4 #include <linux/kernel.h>
5 #include <linux/compiler.h>
6 #include <linux/types.h>
8 #include <asm/page.h> /* IO address mapping routines need this */
10 #include <asm-generic/pci_iomap.h>
12 /* BIO layer definitions. */
13 extern unsigned long kern_base, kern_size;
15 /* __raw_{read,write}{b,w,l,q} uses direct access.
16 * Access the memory as big endian bypassing the cache
17 * by using ASI_PHYS_BYPASS_EC_E
19 #define __raw_readb __raw_readb
20 static inline u8 __raw_readb(const volatile void __iomem *addr)
24 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
26 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
31 #define __raw_readw __raw_readw
32 static inline u16 __raw_readw(const volatile void __iomem *addr)
36 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
38 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
43 #define __raw_readl __raw_readl
44 static inline u32 __raw_readl(const volatile void __iomem *addr)
48 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
50 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
55 #define __raw_readq __raw_readq
56 static inline u64 __raw_readq(const volatile void __iomem *addr)
60 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
62 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
67 #define __raw_writeb __raw_writeb
68 static inline void __raw_writeb(u8 b, const volatile void __iomem *addr)
70 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
72 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
75 #define __raw_writew __raw_writew
76 static inline void __raw_writew(u16 w, const volatile void __iomem *addr)
78 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
80 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
83 #define __raw_writel __raw_writel
84 static inline void __raw_writel(u32 l, const volatile void __iomem *addr)
86 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
88 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
91 #define __raw_writeq __raw_writeq
92 static inline void __raw_writeq(u64 q, const volatile void __iomem *addr)
94 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
96 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
99 /* Memory functions, same as I/O accesses on Ultra.
100 * Access memory as little endian bypassing
101 * the cache by using ASI_PHYS_BYPASS_EC_E_L
104 static inline u8 readb(const volatile void __iomem *addr)
107 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
109 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
115 static inline u16 readw(const volatile void __iomem *addr)
118 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
120 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
127 static inline u32 readl(const volatile void __iomem *addr)
130 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
132 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
139 static inline u64 readq(const volatile void __iomem *addr)
142 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
144 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
150 #define writeb writeb
151 static inline void writeb(u8 b, volatile void __iomem *addr)
153 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
155 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
159 #define writew writew
160 static inline void writew(u16 w, volatile void __iomem *addr)
162 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
164 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
168 #define writel writel
169 static inline void writel(u32 l, volatile void __iomem *addr)
171 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
173 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
177 #define writeq writeq
178 static inline void writeq(u64 q, volatile void __iomem *addr)
180 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
182 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
188 static inline u8 inb(unsigned long addr)
190 return readb((volatile void __iomem *)addr);
194 static inline u16 inw(unsigned long addr)
196 return readw((volatile void __iomem *)addr);
200 static inline u32 inl(unsigned long addr)
202 return readl((volatile void __iomem *)addr);
206 static inline void outb(u8 b, unsigned long addr)
208 writeb(b, (volatile void __iomem *)addr);
212 static inline void outw(u16 w, unsigned long addr)
214 writew(w, (volatile void __iomem *)addr);
218 static inline void outl(u32 l, unsigned long addr)
220 writel(l, (volatile void __iomem *)addr);
224 #define inb_p(__addr) inb(__addr)
225 #define outb_p(__b, __addr) outb(__b, __addr)
226 #define inw_p(__addr) inw(__addr)
227 #define outw_p(__w, __addr) outw(__w, __addr)
228 #define inl_p(__addr) inl(__addr)
229 #define outl_p(__l, __addr) outl(__l, __addr)
231 void outsb(unsigned long, const void *, unsigned long);
232 void outsw(unsigned long, const void *, unsigned long);
233 void outsl(unsigned long, const void *, unsigned long);
234 void insb(unsigned long, void *, unsigned long);
235 void insw(unsigned long, void *, unsigned long);
236 void insl(unsigned long, void *, unsigned long);
238 static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
240 insb((unsigned long __force)port, buf, count);
242 static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
244 insw((unsigned long __force)port, buf, count);
247 static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
249 insl((unsigned long __force)port, buf, count);
252 static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
254 outsb((unsigned long __force)port, buf, count);
257 static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
259 outsw((unsigned long __force)port, buf, count);
262 static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
264 outsl((unsigned long __force)port, buf, count);
267 #define readb_relaxed(__addr) readb(__addr)
268 #define readw_relaxed(__addr) readw(__addr)
269 #define readl_relaxed(__addr) readl(__addr)
270 #define readq_relaxed(__addr) readq(__addr)
272 /* Valid I/O Space regions are anywhere, because each PCI bus supported
273 * can live in an arbitrary area of the physical address range.
275 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
277 /* Now, SBUS variants, only difference from PCI is that we do
278 * not use little-endian ASIs.
280 static inline u8 sbus_readb(const volatile void __iomem *addr)
282 return __raw_readb(addr);
285 static inline u16 sbus_readw(const volatile void __iomem *addr)
287 return __raw_readw(addr);
290 static inline u32 sbus_readl(const volatile void __iomem *addr)
292 return __raw_readl(addr);
295 static inline u64 sbus_readq(const volatile void __iomem *addr)
297 return __raw_readq(addr);
300 static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
302 __raw_writeb(b, addr);
305 static inline void sbus_writew(u16 w, volatile void __iomem *addr)
307 __raw_writew(w, addr);
310 static inline void sbus_writel(u32 l, volatile void __iomem *addr)
312 __raw_writel(l, addr);
315 static inline void sbus_writeq(u64 q, volatile void __iomem *addr)
317 __raw_writeq(q, addr);
320 static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
328 static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
330 volatile void __iomem *d = dst;
338 static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
344 char tmp = sbus_readb(src);
351 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
357 char tmp = readb(src);
363 static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
367 volatile void __iomem *d = dst;
376 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
380 volatile void __iomem *d = dst;
393 /* On sparc64 we have the whole physical IO address space accessible
394 * using physically addressed loads and stores, so this does nothing.
396 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
398 return (void __iomem *)offset;
401 #define ioremap_nocache(X,Y) ioremap((X),(Y))
402 #define ioremap_wc(X,Y) ioremap((X),(Y))
404 static inline void iounmap(volatile void __iomem *addr)
408 #define ioread8(X) readb(X)
409 #define ioread16(X) readw(X)
410 #define ioread16be(X) __raw_readw(X)
411 #define ioread32(X) readl(X)
412 #define ioread32be(X) __raw_readl(X)
413 #define iowrite8(val,X) writeb(val,X)
414 #define iowrite16(val,X) writew(val,X)
415 #define iowrite16be(val,X) __raw_writew(val,X)
416 #define iowrite32(val,X) writel(val,X)
417 #define iowrite32be(val,X) __raw_writel(val,X)
419 /* Create a virtual mapping cookie for an IO port range */
420 void __iomem *ioport_map(unsigned long port, unsigned int nr);
421 void ioport_unmap(void __iomem *);
423 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
425 void pci_iounmap(struct pci_dev *dev, void __iomem *);
427 static inline int sbus_can_dma_64bit(void)
431 static inline int sbus_can_burst64(void)
436 void sbus_set_sbus64(struct device *, int);
439 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
442 #define xlate_dev_mem_ptr(p) __va(p)
445 * Convert a virtual cached pointer to an uncached pointer
447 #define xlate_dev_kmem_ptr(p) p
451 #endif /* !(__SPARC64_IO_H) */