????JFIF??x?x????'
Server IP : 79.136.114.73 / Your IP : 3.16.215.60 Web Server : Apache/2.4.7 (Ubuntu) PHP/5.5.9-1ubuntu4.29 OpenSSL/1.0.1f System : Linux b8009 3.13.0-170-generic #220-Ubuntu SMP Thu May 9 12:40:49 UTC 2019 x86_64 User : www-data ( 33) PHP Version : 5.5.9-1ubuntu4.29 Disable Function : pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority, MySQL : ON | cURL : ON | WGET : ON | Perl : ON | Python : ON | Sudo : ON | Pkexec : ON Directory : /usr/src/linux-headers-3.13.0-49/arch/sh/include/asm/ |
Upload File : |
#ifndef __ASM_SH_ATOMIC_LLSC_H #define __ASM_SH_ATOMIC_LLSC_H /* * To get proper branch prediction for the main line, we must branch * forward to code at the end of this object's .text section, then * branch back to restart the operation. */ static inline void atomic_add(int i, atomic_t *v) { unsigned long tmp; __asm__ __volatile__ ( "1: movli.l @%2, %0 ! atomic_add \n" " add %1, %0 \n" " movco.l %0, @%2 \n" " bf 1b \n" : "=&z" (tmp) : "r" (i), "r" (&v->counter) : "t"); } static inline void atomic_sub(int i, atomic_t *v) { unsigned long tmp; __asm__ __volatile__ ( "1: movli.l @%2, %0 ! atomic_sub \n" " sub %1, %0 \n" " movco.l %0, @%2 \n" " bf 1b \n" : "=&z" (tmp) : "r" (i), "r" (&v->counter) : "t"); } /* * SH-4A note: * * We basically get atomic_xxx_return() for free compared with * atomic_xxx(). movli.l/movco.l require r0 due to the instruction * encoding, so the retval is automatically set without having to * do any special work. */ static inline int atomic_add_return(int i, atomic_t *v) { unsigned long temp; __asm__ __volatile__ ( "1: movli.l @%2, %0 ! atomic_add_return \n" " add %1, %0 \n" " movco.l %0, @%2 \n" " bf 1b \n" " synco \n" : "=&z" (temp) : "r" (i), "r" (&v->counter) : "t"); return temp; } static inline int atomic_sub_return(int i, atomic_t *v) { unsigned long temp; __asm__ __volatile__ ( "1: movli.l @%2, %0 ! atomic_sub_return \n" " sub %1, %0 \n" " movco.l %0, @%2 \n" " bf 1b \n" " synco \n" : "=&z" (temp) : "r" (i), "r" (&v->counter) : "t"); return temp; } static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long tmp; __asm__ __volatile__ ( "1: movli.l @%2, %0 ! atomic_clear_mask \n" " and %1, %0 \n" " movco.l %0, @%2 \n" " bf 1b \n" : "=&z" (tmp) : "r" (~mask), "r" (&v->counter) : "t"); } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { unsigned long tmp; __asm__ __volatile__ ( "1: movli.l @%2, %0 ! atomic_set_mask \n" " or %1, %0 \n" " movco.l %0, @%2 \n" " bf 1b \n" : "=&z" (tmp) : "r" (mask), "r" (&v->counter) : "t"); } #endif /* __ASM_SH_ATOMIC_LLSC_H */