5 /* Amount of relocation etherboot is experiencing */
6 extern unsigned long virt_offset;
8 /* Don't require identity mapped physical memory,
9 * osloader.c is the only valid user at the moment.
11 static inline unsigned long virt_to_phys(volatile const void *virt_addr)
13 return ((unsigned long)virt_addr) + virt_offset;
16 static inline void *phys_to_virt(unsigned long phys_addr)
18 return (void *)(phys_addr - virt_offset);
21 /* virt_to_bus converts an addresss inside of etherboot [_start, _end]
22 * into a memory access cards can use.
24 #define virt_to_bus virt_to_phys
27 /* bus_to_virt reverses virt_to_bus, the address must be output
28 * from virt_to_bus to be valid. This function does not work on
31 #define bus_to_virt phys_to_virt
33 /* ioremap converts a random 32bit bus address into something
34 * etherboot can access.
36 static inline void *ioremap(unsigned long bus_addr, unsigned long length __unused)
38 return bus_to_virt(bus_addr);
41 /* iounmap cleans up anything ioremap had to setup */
42 static inline void iounmap(void *virt_addr __unused)
48 * This file contains the definitions for the x86 IO instructions
49 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
50 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
51 * versions of the single-IO instructions (inb_p/inw_p/..).
53 * This file is not meant to be obfuscating: it's just complicated
54 * to (a) handle it all in a way that makes gcc able to optimize it
55 * as well as possible and (b) trying to avoid writing the same thing
56 * over and over again with slight variations and possibly making a
61 * Thanks to James van Artsdalen for a better timing-fix than
62 * the two short jumps: using outb's to a nonexistent port seems
63 * to guarantee better timings even on fast machines.
65 * On the other hand, I'd like to be sure of a non-existent port:
66 * I feel a bit unsafe about using 0x80 (should be safe, though)
71 #ifdef SLOW_IO_BY_JUMPING
72 #define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
74 #define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80")
78 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
80 #define SLOW_DOWN_IO __SLOW_DOWN_IO
84 * readX/writeX() are used to access memory mapped devices. On some
85 * architectures the memory mapped IO stuff needs to be accessed
86 * differently. On the x86 architecture, we just read/write the
87 * memory location directly.
89 #define readb(addr) (*(volatile unsigned char *) (addr))
90 #define readw(addr) (*(volatile unsigned short *) (addr))
91 #define readl(addr) (*(volatile unsigned int *) (addr))
93 #define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
94 #define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
95 #define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
97 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
98 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
101 * Force strict CPU ordering.
102 * And yes, this is required on UP too when we're talking
105 * For now, "wmb()" doesn't actually do anything, as all
106 * Intel CPU's follow what Intel calls a *Processor Order*,
107 * in which all writes are seen in the program order even
110 * I expect future Intel CPU's to have a weaker ordering,
111 * but I'd also expect them to finally get their act together
112 * and add some real memory barriers if so.
114 * Some non intel clones support out of order store. wmb() ceases to be a
118 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
124 * Talk about misusing macros..
127 #define __OUT1(s,x) \
128 extern void __out##s(unsigned x value, unsigned short port); \
129 extern inline void __out##s(unsigned x value, unsigned short port) {
131 #define __OUT2(s,s1,s2) \
132 __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
134 #define __OUT(s,s1,x) \
135 __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \
136 __OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \
137 __OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \
138 __OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; }
141 extern unsigned x __in##s(unsigned short port); \
142 extern inline unsigned x __in##s(unsigned short port) { unsigned x _v;
144 #define __IN2(s,s1,s2) \
145 __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
147 #define __IN(s,s1,x,i...) \
148 __IN1(s,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \
149 __IN1(s##c,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \
150 __IN1(s##_p,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \
151 __IN1(s##c_p,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; }
154 extern void ins##s(unsigned short port, void * addr, unsigned long count); \
155 extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
156 { __asm__ __volatile__ ("cld ; rep ; ins" #s \
157 : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
160 extern void outs##s(unsigned short port, const void * addr, unsigned long count); \
161 extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
162 { __asm__ __volatile__ ("cld ; rep ; outs" #s \
163 : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
182 * Note that due to the way __builtin_constant_p() works, you
183 * - can't use it inside a inline function (it will never be true)
184 * - you don't have to worry about side effects within the __builtin..
186 #define outb(val,port) \
187 ((__builtin_constant_p((port)) && (port) < 256) ? \
188 __outbc((val),(port)) : \
189 __outb((val),(port)))
192 ((__builtin_constant_p((port)) && (port) < 256) ? \
196 #define outb_p(val,port) \
197 ((__builtin_constant_p((port)) && (port) < 256) ? \
198 __outbc_p((val),(port)) : \
199 __outb_p((val),(port)))
201 #define inb_p(port) \
202 ((__builtin_constant_p((port)) && (port) < 256) ? \
206 #define outw(val,port) \
207 ((__builtin_constant_p((port)) && (port) < 256) ? \
208 __outwc((val),(port)) : \
209 __outw((val),(port)))
212 ((__builtin_constant_p((port)) && (port) < 256) ? \
216 #define outw_p(val,port) \
217 ((__builtin_constant_p((port)) && (port) < 256) ? \
218 __outwc_p((val),(port)) : \
219 __outw_p((val),(port)))
221 #define inw_p(port) \
222 ((__builtin_constant_p((port)) && (port) < 256) ? \
226 #define outl(val,port) \
227 ((__builtin_constant_p((port)) && (port) < 256) ? \
228 __outlc((val),(port)) : \
229 __outl((val),(port)))
232 ((__builtin_constant_p((port)) && (port) < 256) ? \
236 #define outl_p(val,port) \
237 ((__builtin_constant_p((port)) && (port) < 256) ? \
238 __outlc_p((val),(port)) : \
239 __outl_p((val),(port)))
241 #define inl_p(port) \
242 ((__builtin_constant_p((port)) && (port) < 256) ? \
246 #endif /* ETHERBOOT_IO_H */