1 /****************************************************************************** 2 * os.h 3 * 4 * random collection of macros and definition 5 */ 6 7 #ifndef _XEN_OS_H_ 8 #define _XEN_OS_H_ 9 #include <machine/param.h> 10 11 #ifdef PAE 12 #define CONFIG_X86_PAE 13 #endif 14 15 #ifdef LOCORE 16 #define __ASSEMBLY__ 17 #endif 18 19 #if !defined(__XEN_INTERFACE_VERSION__) 20 #define __XEN_INTERFACE_VERSION__ 0x00030208 21 #endif 22 23 #define GRANT_REF_INVALID 0xffffffff 24 25 #include <xen/interface/xen.h> 26 27 /* Everything below this point is not included by assembler (.S) files. */ 28 #ifndef __ASSEMBLY__ 29 30 /* Force a proper event-channel callback from Xen. */ 31 void force_evtchn_callback(void); 32 33 #define likely(x) __builtin_expect((x),1) 34 #define unlikely(x) __builtin_expect((x),0) 35 36 #ifndef vtophys 37 #include <vm/vm.h> 38 #include <vm/vm_param.h> 39 #include <vm/pmap.h> 40 #endif 41 42 extern int gdtset; 43 #ifdef SMP 44 #include <sys/time.h> /* XXX for pcpu.h */ 45 #include <sys/pcpu.h> /* XXX for PCPU_GET */ 46 static inline int 47 smp_processor_id(void) 48 { 49 if (likely(gdtset)) 50 return PCPU_GET(cpuid); 51 return 0; 52 } 53 54 #else 55 #define smp_processor_id() 0 56 #endif 57 58 #ifndef NULL 59 #define NULL (void *)0 60 #endif 61 62 #ifndef PANIC_IF 63 #define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 64 #endif 65 66 extern shared_info_t *HYPERVISOR_shared_info; 67 68 /* Somewhere in the middle of the GCC 2.96 development cycle, we implemented 69 a mechanism by which the user can annotate likely branch directions and 70 expect the blocks to be reordered appropriately. Define __builtin_expect 71 to nothing for earlier compilers. */ 72 73 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 74 static inline void rep_nop(void) 75 { 76 __asm__ __volatile__ ( "rep;nop" : : : "memory" ); 77 } 78 #define cpu_relax() rep_nop() 79 80 81 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96 82 #define __builtin_expect(x, expected_value) (x) 83 #endif 84 85 #define per_cpu(var, cpu) (pcpu_find((cpu))->pc_ ## var) 86 87 /* crude memory allocator for memory allocation early in 88 * boot 89 */ 90 void *bootmem_alloc(unsigned int size); 91 void bootmem_free(void *ptr, unsigned int size); 92 93 #include <sys/types.h> 94 95 void printk(const char *fmt, ...); 96 97 /* some function prototypes */ 98 void trap_init(void); 99 100 #ifndef XENHVM 101 102 /* 103 * STI/CLI equivalents. These basically set and clear the virtual 104 * event_enable flag in the shared_info structure. Note that when 105 * the enable bit is set, there may be pending events to be handled. 106 * We may therefore call into do_hypervisor_callback() directly. 107 */ 108 109 110 #define __cli() \ 111 do { \ 112 vcpu_info_t *_vcpu; \ 113 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 114 _vcpu->evtchn_upcall_mask = 1; \ 115 barrier(); \ 116 } while (0) 117 118 #define __sti() \ 119 do { \ 120 vcpu_info_t *_vcpu; \ 121 barrier(); \ 122 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 123 _vcpu->evtchn_upcall_mask = 0; \ 124 barrier(); /* unmask then check (avoid races) */ \ 125 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 126 force_evtchn_callback(); \ 127 } while (0) 128 129 #define __restore_flags(x) \ 130 do { \ 131 vcpu_info_t *_vcpu; \ 132 barrier(); \ 133 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 134 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 135 barrier(); /* unmask then check (avoid races) */ \ 136 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 137 force_evtchn_callback(); \ 138 } \ 139 } while (0) 140 141 /* 142 * Add critical_{enter, exit}? 143 * 144 */ 145 #define __save_and_cli(x) \ 146 do { \ 147 vcpu_info_t *_vcpu; \ 148 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 149 (x) = _vcpu->evtchn_upcall_mask; \ 150 _vcpu->evtchn_upcall_mask = 1; \ 151 barrier(); \ 152 } while (0) 153 154 155 #define cli() __cli() 156 #define sti() __sti() 157 #define save_flags(x) __save_flags(x) 158 #define restore_flags(x) __restore_flags(x) 159 #define save_and_cli(x) __save_and_cli(x) 160 161 #define local_irq_save(x) __save_and_cli(x) 162 #define local_irq_restore(x) __restore_flags(x) 163 #define local_irq_disable() __cli() 164 #define local_irq_enable() __sti() 165 166 #define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));} 167 #define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); } 168 #define spin_lock_irqsave mtx_lock_irqsave 169 #define spin_unlock_irqrestore mtx_unlock_irqrestore 170 171 #endif 172 173 #ifndef xen_mb 174 #define xen_mb() mb() 175 #endif 176 #ifndef xen_rmb 177 #define xen_rmb() rmb() 178 #endif 179 #ifndef xen_wmb 180 #define xen_wmb() wmb() 181 #endif 182 #ifdef SMP 183 #define smp_mb() mb() 184 #define smp_rmb() rmb() 185 #define smp_wmb() wmb() 186 #define smp_read_barrier_depends() read_barrier_depends() 187 #define set_mb(var, value) do { xchg(&var, value); } while (0) 188 #else 189 #define smp_mb() barrier() 190 #define smp_rmb() barrier() 191 #define smp_wmb() barrier() 192 #define smp_read_barrier_depends() do { } while(0) 193 #define set_mb(var, value) do { var = value; barrier(); } while (0) 194 #endif 195 196 197 /* This is a barrier for the compiler only, NOT the processor! */ 198 #define barrier() __asm__ __volatile__("": : :"memory") 199 200 #define LOCK_PREFIX "" 201 #define LOCK "" 202 #define ADDR (*(volatile long *) addr) 203 /* 204 * Make sure gcc doesn't try to be clever and move things around 205 * on us. We need to use _exactly_ the address the user gave us, 206 * not some alias that contains the same information. 207 */ 208 typedef struct { volatile int counter; } atomic_t; 209 210 211 212 #define xen_xchg(ptr,v) \ 213 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 214 struct __xchg_dummy { unsigned long a[100]; }; 215 #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 216 static __inline unsigned long __xchg(unsigned long x, volatile void * ptr, 217 int size) 218 { 219 switch (size) { 220 case 1: 221 __asm__ __volatile__("xchgb %b0,%1" 222 :"=q" (x) 223 :"m" (*__xg(ptr)), "0" (x) 224 :"memory"); 225 break; 226 case 2: 227 __asm__ __volatile__("xchgw %w0,%1" 228 :"=r" (x) 229 :"m" (*__xg(ptr)), "0" (x) 230 :"memory"); 231 break; 232 case 4: 233 __asm__ __volatile__("xchgl %0,%1" 234 :"=r" (x) 235 :"m" (*__xg(ptr)), "0" (x) 236 :"memory"); 237 break; 238 } 239 return x; 240 } 241 242 /** 243 * test_and_clear_bit - Clear a bit and return its old value 244 * @nr: Bit to set 245 * @addr: Address to count from 246 * 247 * This operation is atomic and cannot be reordered. 248 * It also implies a memory barrier. 249 */ 250 static __inline int test_and_clear_bit(int nr, volatile void * addr) 251 { 252 int oldbit; 253 254 __asm__ __volatile__( LOCK_PREFIX 255 "btrl %2,%1\n\tsbbl %0,%0" 256 :"=r" (oldbit),"=m" (ADDR) 257 :"Ir" (nr) : "memory"); 258 return oldbit; 259 } 260 261 static __inline int constant_test_bit(int nr, const volatile void * addr) 262 { 263 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 264 } 265 266 static __inline int variable_test_bit(int nr, volatile void * addr) 267 { 268 int oldbit; 269 270 __asm__ __volatile__( 271 "btl %2,%1\n\tsbbl %0,%0" 272 :"=r" (oldbit) 273 :"m" (ADDR),"Ir" (nr)); 274 return oldbit; 275 } 276 277 #define test_bit(nr,addr) \ 278 (__builtin_constant_p(nr) ? \ 279 constant_test_bit((nr),(addr)) : \ 280 variable_test_bit((nr),(addr))) 281 282 283 /** 284 * set_bit - Atomically set a bit in memory 285 * @nr: the bit to set 286 * @addr: the address to start counting from 287 * 288 * This function is atomic and may not be reordered. See __set_bit() 289 * if you do not require the atomic guarantees. 290 * Note that @nr may be almost arbitrarily large; this function is not 291 * restricted to acting on a single-word quantity. 292 */ 293 static __inline__ void set_bit(int nr, volatile void * addr) 294 { 295 __asm__ __volatile__( LOCK_PREFIX 296 "btsl %1,%0" 297 :"=m" (ADDR) 298 :"Ir" (nr)); 299 } 300 301 /** 302 * clear_bit - Clears a bit in memory 303 * @nr: Bit to clear 304 * @addr: Address to start counting from 305 * 306 * clear_bit() is atomic and may not be reordered. However, it does 307 * not contain a memory barrier, so if it is used for locking purposes, 308 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 309 * in order to ensure changes are visible on other processors. 310 */ 311 static __inline__ void clear_bit(int nr, volatile void * addr) 312 { 313 __asm__ __volatile__( LOCK_PREFIX 314 "btrl %1,%0" 315 :"=m" (ADDR) 316 :"Ir" (nr)); 317 } 318 319 /** 320 * atomic_inc - increment atomic variable 321 * @v: pointer of type atomic_t 322 * 323 * Atomically increments @v by 1. Note that the guaranteed 324 * useful range of an atomic_t is only 24 bits. 325 */ 326 static __inline__ void atomic_inc(atomic_t *v) 327 { 328 __asm__ __volatile__( 329 LOCK "incl %0" 330 :"=m" (v->counter) 331 :"m" (v->counter)); 332 } 333 334 335 #define rdtscll(val) \ 336 __asm__ __volatile__("rdtsc" : "=A" (val)) 337 338 339 340 /* 341 * Kernel pointers have redundant information, so we can use a 342 * scheme where we can return either an error code or a dentry 343 * pointer with the same return value. 344 * 345 * This should be a per-architecture thing, to allow different 346 * error and pointer decisions. 347 */ 348 #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) 349 350 static inline void *ERR_PTR(long error) 351 { 352 return (void *) error; 353 } 354 355 static inline long PTR_ERR(const void *ptr) 356 { 357 return (long) ptr; 358 } 359 360 static inline long IS_ERR(const void *ptr) 361 { 362 return IS_ERR_VALUE((unsigned long)ptr); 363 } 364 365 #endif /* !__ASSEMBLY__ */ 366 367 #endif /* _OS_H_ */ 368