1 /*
2 * Copyright © 2008 Keith Packard <keithp@keithp.com>
3 *
4 * This file is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18 #ifndef _LINUX_IO_MAPPING_H
19 #define _LINUX_IO_MAPPING_H
20
21 #include <linux/types.h>
22 #include <linux/slab.h>
23 #include <linux/bug.h>
24 #include <linux/io.h>
25 #include <asm/page.h>
26
27 /*
28 * The io_mapping mechanism provides an abstraction for mapping
29 * individual pages from an io device to the CPU in an efficient fashion.
30 *
31 * See Documentation/io-mapping.txt
32 */
33
34 struct io_mapping {
35 resource_size_t base;
36 unsigned long size;
37 pgprot_t prot;
38 void __iomem *iomem;
39 };
40
41 #ifdef CONFIG_HAVE_ATOMIC_IOMAP
42
43 #include <asm/iomap.h>
44 /*
45 * For small address space machines, mapping large objects
46 * into the kernel virtual space isn't practical. Where
47 * available, use fixmap support to dynamically map pages
48 * of the object at run time.
49 */
50
51 static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)52 io_mapping_init_wc(struct io_mapping *iomap,
53 resource_size_t base,
54 unsigned long size)
55 {
56 pgprot_t prot;
57
58 if (iomap_create_wc(base, size, &prot))
59 return NULL;
60
61 iomap->base = base;
62 iomap->size = size;
63 iomap->prot = prot;
64 return iomap;
65 }
66
67 static inline void
io_mapping_fini(struct io_mapping * mapping)68 io_mapping_fini(struct io_mapping *mapping)
69 {
70 iomap_free(mapping->base, mapping->size);
71 }
72
73 /* Atomic map/unmap */
74 static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)75 io_mapping_map_atomic_wc(struct io_mapping *mapping,
76 unsigned long offset)
77 {
78 resource_size_t phys_addr;
79 unsigned long pfn;
80
81 BUG_ON(offset >= mapping->size);
82 phys_addr = mapping->base + offset;
83 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
84 return iomap_atomic_prot_pfn(pfn, mapping->prot);
85 }
86
87 static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)88 io_mapping_unmap_atomic(void __iomem *vaddr)
89 {
90 iounmap_atomic(vaddr);
91 }
92
93 static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)94 io_mapping_map_wc(struct io_mapping *mapping,
95 unsigned long offset,
96 unsigned long size)
97 {
98 resource_size_t phys_addr;
99
100 BUG_ON(offset >= mapping->size);
101 phys_addr = mapping->base + offset;
102
103 return ioremap_wc(phys_addr, size);
104 }
105
106 static inline void
io_mapping_unmap(void __iomem * vaddr)107 io_mapping_unmap(void __iomem *vaddr)
108 {
109 iounmap(vaddr);
110 }
111
112 #else
113
114 #include <linux/uaccess.h>
115 #include <asm/pgtable.h>
116
117 /* Create the io_mapping object*/
118 static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)119 io_mapping_init_wc(struct io_mapping *iomap,
120 resource_size_t base,
121 unsigned long size)
122 {
123 iomap->iomem = ioremap_wc(base, size);
124 if (!iomap->iomem)
125 return NULL;
126
127 iomap->base = base;
128 iomap->size = size;
129 #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
130 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
131 #elif defined(pgprot_writecombine)
132 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
133 #else
134 iomap->prot = pgprot_noncached(PAGE_KERNEL);
135 #endif
136
137 return iomap;
138 }
139
140 static inline void
io_mapping_fini(struct io_mapping * mapping)141 io_mapping_fini(struct io_mapping *mapping)
142 {
143 iounmap(mapping->iomem);
144 }
145
146 /* Non-atomic map/unmap */
147 static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)148 io_mapping_map_wc(struct io_mapping *mapping,
149 unsigned long offset,
150 unsigned long size)
151 {
152 return mapping->iomem + offset;
153 }
154
155 static inline void
io_mapping_unmap(void __iomem * vaddr)156 io_mapping_unmap(void __iomem *vaddr)
157 {
158 }
159
160 /* Atomic map/unmap */
161 static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)162 io_mapping_map_atomic_wc(struct io_mapping *mapping,
163 unsigned long offset)
164 {
165 preempt_disable();
166 pagefault_disable();
167 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
168 }
169
170 static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)171 io_mapping_unmap_atomic(void __iomem *vaddr)
172 {
173 io_mapping_unmap(vaddr);
174 pagefault_enable();
175 preempt_enable();
176 }
177
178 #endif /* HAVE_ATOMIC_IOMAP */
179
180 static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,unsigned long size)181 io_mapping_create_wc(resource_size_t base,
182 unsigned long size)
183 {
184 struct io_mapping *iomap;
185
186 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
187 if (!iomap)
188 return NULL;
189
190 if (!io_mapping_init_wc(iomap, base, size)) {
191 kfree(iomap);
192 return NULL;
193 }
194
195 return iomap;
196 }
197
198 static inline void
io_mapping_free(struct io_mapping * iomap)199 io_mapping_free(struct io_mapping *iomap)
200 {
201 io_mapping_fini(iomap);
202 kfree(iomap);
203 }
204
205 #endif /* _LINUX_IO_MAPPING_H */
206