From d7ce55ed1b701d7266cf7a1fa958edb2b7c18718 Mon Sep 17 00:00:00 2001 From: Philippe Hamelin Date: Wed, 6 Apr 2011 13:32:58 -0400 Subject: [PATCH] Add ARMv6 and ARMv7 support. --- rtt/os/CMakeLists.txt | 1 + rtt/os/oro_arm/CMakeLists.txt | 6 + rtt/os/oro_arm/oro_atomic.h | 207 +++++++++++++++++++++++++++++++++++++++++ rtt/os/oro_arm/oro_system.h | 100 ++++++++++++++++++++ rtt/os/oro_atomic.h | 2 + rtt/os/oro_system.h | 2 + rtt/rtt-config.h.in | 2 + 7 files changed, 320 insertions(+), 0 deletions(-) create mode 100644 rtt/os/oro_arm/CMakeLists.txt create mode 100644 rtt/os/oro_arm/oro_atomic.h create mode 100644 rtt/os/oro_arm/oro_system.h diff --git a/rtt/os/CMakeLists.txt b/rtt/os/CMakeLists.txt index 57cadad..f15a17d 100644 --- a/rtt/os/CMakeLists.txt +++ b/rtt/os/CMakeLists.txt @@ -16,6 +16,7 @@ ADD_SUBDIRECTORY( win32 ) ADD_SUBDIRECTORY( oro_i386 ) ADD_SUBDIRECTORY( oro_x86_64 ) ADD_SUBDIRECTORY( oro_powerpc ) +ADD_SUBDIRECTORY( oro_arm ) ADD_SUBDIRECTORY( oro_msvc ) ADD_SUBDIRECTORY( oro_noasm ) diff --git a/rtt/os/oro_arm/CMakeLists.txt b/rtt/os/oro_arm/CMakeLists.txt new file mode 100644 index 0000000..7e1e0cf --- /dev/null +++ b/rtt/os/oro_arm/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE( GLOB CPPS [^.]*.cpp [^.]*.c ) +FILE( GLOB HPPS [^.]*.hpp [^.]*.h [^.]*.inl ) + +GLOBAL_ADD_INCLUDE( rtt/os/oro_arm ${HPPS} ) +GLOBAL_ADD_SRC( ${CPPS} ) + diff --git a/rtt/os/oro_arm/oro_atomic.h b/rtt/os/oro_arm/oro_atomic.h new file mode 100644 index 0000000..d4128b2 --- /dev/null +++ b/rtt/os/oro_arm/oro_atomic.h @@ -0,0 +1,207 @@ +#ifndef __ARCH_arm_ORO_ATOMIC__ +#define __ARCH_arm_ORO_ATOMIC__ + +/* + * Philippe Hamelin (philippe.hamelin@gmail.com) + * Expertise robotique et civil + * Institut de recherche d'Hydro-Quebec + * Quebec, Canada + * + * These atomic operations have been mostly copied from the Linux kernel in + * arch/arm/include/asm/atomic.h and arch/arm/include/asm/system.h. There's + * a lot of variants depending on the instruction set version. This ARM port + * only support ARMv6 and ARMv7 cpu, which includes Cortex-A8 and Cortex-A9. + */ + +#ifdef _cplusplus +extern "C" +{ +#endif // _cplusplus + +// Detect instruction set version +#if defined(__ARM_ARCH_6__) || \ + defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7__) +# define USE_ARMV6_INSTRUCTIONS 1 +#endif + +#if defined(__ARM_ARCH_7A__) || \ + defined(__ARM_ARCH_7__) +# define USE_ARMV7_INSTRUCTIONS 1 +#endif + +// The barrier for SMP is different on ARMv6 and ARMv7. +#if defined(USE_ARMV7_INSTRUCTIONS) +# define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#elif defined(USE_ARMV6_INSTRUCTIONS) +# define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#else +# error This ARM architecture is not supported. +#endif + +#define barrier() __asm__ __volatile__("": : :"memory") + +// CONFIG_FORCE_UP optimize for single cpu +#ifndef CONFIG_FORCE_UP +#define smp_mb() dmb() +#else +#define smp_mb() barrier() +#endif + +typedef struct { volatile int counter; } oro_atomic_t; + +#define ORO_ATOMIC_INIT(i) { (i) } +#define ORO_ATOMIC_SETUP oro_atomic_set +#define ORO_ATOMIC_CLEANUP(v) + +/* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ +#define oro_atomic_read(v) (*(volatile int *)&(v)->counter) +#define oro_atomic_set(v,i) (((v)->counter) = (i)) + +/* + * ARMv6 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void oro_atomic_add(int i, oro_atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_add\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int oro_atomic_add_return(int i, oro_atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + + __asm__ __volatile__("@ atomic_add_return\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void oro_atomic_sub(int i, oro_atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_sub\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +} + +static inline int oro_atomic_sub_return(int i, oro_atomic_t *v) +{ + unsigned long tmp; + int result; + + smp_mb(); + + __asm__ __volatile__("@ atomic_sub_return\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int oro_atomic_cmpxchg(oro_atomic_t *ptr, int old, int newv) +{ + unsigned long oldval, res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%3]\n" + "mov %0, #0\n" + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "Ir" (old), "r" (newv) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline void oro_atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long tmp, tmp2; + + __asm__ __volatile__("@ atomic_clear_mask\n" +"1: ldrex %0, [%3]\n" +" bic %0, %0, %4\n" +" strex %1, %0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) + : "r" (addr), "Ir" (mask) + : "cc"); +} + +#define oro_atomic_xchg(v, newv) (xchg(&((v)->counter), newv)) + +#define oro_atomic_inc(v) oro_atomic_add(1, v) +#define oro_atomic_dec(v) oro_atomic_sub(1, v) + +#define oro_atomic_inc_and_test(v) (oro_atomic_add_return(1, v) == 0) +#define oro_atomic_dec_and_test(v) (oro_atomic_sub_return(1, v) == 0) +#define oro_atomic_inc_return(v) (oro_atomic_add_return(1, v)) +#define oro_atomic_dec_return(v) (oro_atomic_sub_return(1, v)) +#define oro_atomic_sub_and_test(i, v) (oro_atomic_sub_return(i, v) == 0) + +#define oro_atomic_add_negative(i,v) (oro_atomic_add_return(i, v) < 0) + +#define smp_mb__before_oro_atomic_dec() smp_mb() +#define smp_mb__after_oro_atomic_dec() smp_mb() +#define smp_mb__before_oro_atomic_inc() smp_mb() +#define smp_mb__after_oro_atomic_inc() smp_mb() + +#ifdef _cplusplus +} // end extern "C" +#endif // _cplusplus + +#endif // __ARCH_arm_ORO_atomic__ + diff --git a/rtt/os/oro_arm/oro_system.h b/rtt/os/oro_arm/oro_system.h new file mode 100644 index 0000000..634f5f2 --- /dev/null +++ b/rtt/os/oro_arm/oro_system.h @@ -0,0 +1,100 @@ +#ifndef __ARCH_arm_ORO_SYSTEM__ +#define __ARCH_arm_ORO_SYSTEM__ + +/* + * Philippe Hamelin (philippe.hamelin@gmail.com) + * Expertise robotique et civil + * Institut de recherche d'Hydro-Quebec + * Quebec, Canada + * + * These atomic operations have been mostly copied from the Linux kernel in + * arch/arm/include/asm/atomic.h and arch/arm/include/asm/system.h. There's + * a lot of variants depending on the instruction set version. This ARM port + * only support ARMv6 and ARMv7 cpu, which includes Cortex-A8 and Cortex-A9. + */ + +#include "oro_atomic.h" + +#ifdef _cplusplus +extern "C" +{ +#endif // _cplusplus + +/* + * cmpxchg only support 32-bits operands on ARMv6. + */ +static inline unsigned long __oro_cmpxchg(volatile void *ptr, unsigned long old, + unsigned long newv, int size) +{ + unsigned long oldval, res; + + switch (size) { +#ifdef USE_ARMV7_INSTRUCTIONS + case 1: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexb %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexbeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (newv) + : "memory", "cc"); + } while (res); + break; + case 2: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexh %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexheq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (newv) + : "memory", "cc"); + } while (res); + break; +#endif /* USE_ARMV7_INSTRUCTIONS */ + case 4: + do { + asm volatile("@ __cmpxchg4\n" + " ldrex %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (newv) + : "memory", "cc"); + } while (res); + break; + default: + oldval = 0; + } + + return oldval; +} + +static inline unsigned long __oro_cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long newv, int size) +{ + unsigned long ret; + + barrier(); + ret = __oro_cmpxchg(ptr, old, newv, size); + barrier(); + + return ret; +} + +#define oro_cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__oro_cmpxchg_mb((ptr),\ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +#ifdef _cplusplus +} // end extern "C" +#endif // _cplusplus + +#endif // __ARCH_arm_ORO_SYSTEM__ + diff --git a/rtt/os/oro_atomic.h b/rtt/os/oro_atomic.h index 625cbda..103ff3e 100644 --- a/rtt/os/oro_atomic.h +++ b/rtt/os/oro_atomic.h @@ -51,6 +51,8 @@ # include "oro_x86_64/oro_atomic.h" # elif defined( OROBLD_OS_ARCH_ppc ) # include "oro_powerpc/oro_atomic.h" +# elif defined( OROBLD_OS_ARCH_arm ) +# include "oro_arm/oro_atomic.h" # endif # endif #else diff --git a/rtt/os/oro_system.h b/rtt/os/oro_system.h index 3b72cef..18a1fd6 100644 --- a/rtt/os/oro_system.h +++ b/rtt/os/oro_system.h @@ -50,6 +50,8 @@ # include "oro_x86_64/oro_system.h" # elif defined( OROBLD_OS_ARCH_ppc ) # include "oro_powerpc/oro_system.h" +# elif defined( OROBLD_OS_ARCH_arm ) +# include "oro_arm/oro_system.h" # endif #else #define new _new diff --git a/rtt/rtt-config.h.in b/rtt/rtt-config.h.in index 2419798..eb4110e 100644 --- a/rtt/rtt-config.h.in +++ b/rtt/rtt-config.h.in @@ -21,6 +21,8 @@ # define OROBLD_OS_ARCH_x86_64 # elif defined( __GNUC__ ) && (defined( __powerpc__ ) || defined( __PPC__ ) ) # define OROBLD_OS_ARCH_ppc +# elif defined( __GNUC__ ) && (defined( __arm__ ) || defined( __ARM__ ) ) +# define OROBLD_OS_ARCH_arm # elif defined( __GNUC__ ) && defined( __ia64__ ) # error "ia64 Is not yet supported, contact the orocos-dev mailinglist for further actions." # define OROBLD_OS_ARCH_ia64 -- 1.7.0.4