1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4 
5 #include <linux/kobject.h>
6 #include <linux/list.h>
7 
8 struct msi_msg {
9 	u32	address_lo;	/* low 32 bits of msi message address */
10 	u32	address_hi;	/* high 32 bits of msi message address */
11 	u32	data;		/* 16 bits of msi message data */
12 };
13 
14 extern int pci_msi_ignore_mask;
15 /* Helper functions */
16 struct irq_data;
17 struct msi_desc;
18 struct pci_dev;
19 struct platform_msi_priv_data;
20 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21 #ifdef CONFIG_GENERIC_MSI_IRQ
22 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23 #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)24 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
25 {
26 }
27 #endif
28 
29 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
30 				    struct msi_msg *msg);
31 
32 /**
33  * platform_msi_desc - Platform device specific msi descriptor data
34  * @msi_priv_data:	Pointer to platform private data
35  * @msi_index:		The index of the MSI descriptor for multi MSI
36  */
37 struct platform_msi_desc {
38 	struct platform_msi_priv_data	*msi_priv_data;
39 	u16				msi_index;
40 };
41 
42 /**
43  * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
44  * @msi_index:		The index of the MSI descriptor
45  */
46 struct fsl_mc_msi_desc {
47 	u16				msi_index;
48 };
49 
50 /**
51  * struct msi_desc - Descriptor structure for MSI based interrupts
52  * @list:	List head for management
53  * @irq:	The base interrupt number
54  * @nvec_used:	The number of vectors used
55  * @dev:	Pointer to the device which uses this descriptor
56  * @msg:	The last set MSI message cached for reuse
57  * @affinity:	Optional pointer to a cpu affinity mask for this descriptor
58  *
59  * @masked:	[PCI MSI/X] Mask bits
60  * @is_msix:	[PCI MSI/X] True if MSI-X
61  * @multiple:	[PCI MSI/X] log2 num of messages allocated
62  * @multi_cap:	[PCI MSI/X] log2 num of messages supported
63  * @maskbit:	[PCI MSI/X] Mask-Pending bit supported?
64  * @is_64:	[PCI MSI/X] Address size: 0=32bit 1=64bit
65  * @entry_nr:	[PCI MSI/X] Entry which is described by this descriptor
66  * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
67  * @mask_pos:	[PCI MSI]   Mask register position
68  * @mask_base:	[PCI MSI-X] Mask register base address
69  * @platform:	[platform]  Platform device specific msi descriptor data
70  * @fsl_mc:	[fsl-mc]    FSL MC device specific msi descriptor data
71  */
72 struct msi_desc {
73 	/* Shared device/bus type independent data */
74 	struct list_head		list;
75 	unsigned int			irq;
76 	unsigned int			nvec_used;
77 	struct device			*dev;
78 	struct msi_msg			msg;
79 	struct cpumask			*affinity;
80 
81 	union {
82 		/* PCI MSI/X specific data */
83 		struct {
84 			u32 masked;
85 			struct {
86 				__u8	is_msix		: 1;
87 				__u8	multiple	: 3;
88 				__u8	multi_cap	: 3;
89 				__u8	maskbit		: 1;
90 				__u8	is_64		: 1;
91 				__u16	entry_nr;
92 				unsigned default_irq;
93 			} msi_attrib;
94 			union {
95 				u8	mask_pos;
96 				void __iomem *mask_base;
97 			};
98 		};
99 
100 		/*
101 		 * Non PCI variants add their data structure here. New
102 		 * entries need to use a named structure. We want
103 		 * proper name spaces for this. The PCI part is
104 		 * anonymous for now as it would require an immediate
105 		 * tree wide cleanup.
106 		 */
107 		struct platform_msi_desc platform;
108 		struct fsl_mc_msi_desc fsl_mc;
109 	};
110 };
111 
112 /* Helpers to hide struct msi_desc implementation details */
113 #define msi_desc_to_dev(desc)		((desc)->dev)
114 #define dev_to_msi_list(dev)		(&(dev)->msi_list)
115 #define first_msi_entry(dev)		\
116 	list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
117 #define for_each_msi_entry(desc, dev)	\
118 	list_for_each_entry((desc), dev_to_msi_list((dev)), list)
119 #define for_each_msi_entry_safe(desc, tmp, dev)	\
120 	list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
121 #define for_each_msi_vector(desc, __irq, dev)				\
122 	for_each_msi_entry((desc), (dev))				\
123 		if ((desc)->irq)					\
124 			for (__irq = (desc)->irq;			\
125 			     __irq < ((desc)->irq + (desc)->nvec_used);	\
126 			     __irq++)
127 
128 #ifdef CONFIG_PCI_MSI
129 #define first_pci_msi_entry(pdev)	first_msi_entry(&(pdev)->dev)
130 #define for_each_pci_msi_entry(desc, pdev)	\
131 	for_each_msi_entry((desc), &(pdev)->dev)
132 
133 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
134 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
135 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
136 #else /* CONFIG_PCI_MSI */
msi_desc_to_pci_sysdata(struct msi_desc * desc)137 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
138 {
139 	return NULL;
140 }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)141 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
142 {
143 }
144 #endif /* CONFIG_PCI_MSI */
145 
146 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
147 				 const struct cpumask *affinity);
148 void free_msi_entry(struct msi_desc *entry);
149 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
150 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
151 
152 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
153 void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
154 void pci_msi_mask_irq(struct irq_data *data);
155 void pci_msi_unmask_irq(struct irq_data *data);
156 
157 /* Conversion helpers. Should be removed after merging */
__write_msi_msg(struct msi_desc * entry,struct msi_msg * msg)158 static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
159 {
160 	__pci_write_msi_msg(entry, msg);
161 }
write_msi_msg(int irq,struct msi_msg * msg)162 static inline void write_msi_msg(int irq, struct msi_msg *msg)
163 {
164 	pci_write_msi_msg(irq, msg);
165 }
mask_msi_irq(struct irq_data * data)166 static inline void mask_msi_irq(struct irq_data *data)
167 {
168 	pci_msi_mask_irq(data);
169 }
unmask_msi_irq(struct irq_data * data)170 static inline void unmask_msi_irq(struct irq_data *data)
171 {
172 	pci_msi_unmask_irq(data);
173 }
174 
175 /*
176  * The arch hooks to setup up msi irqs. Those functions are
177  * implemented as weak symbols so that they /can/ be overriden by
178  * architecture specific code if needed.
179  */
180 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
181 void arch_teardown_msi_irq(unsigned int irq);
182 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
183 void arch_teardown_msi_irqs(struct pci_dev *dev);
184 void arch_restore_msi_irqs(struct pci_dev *dev);
185 
186 void default_teardown_msi_irqs(struct pci_dev *dev);
187 void default_restore_msi_irqs(struct pci_dev *dev);
188 
189 struct msi_controller {
190 	struct module *owner;
191 	struct device *dev;
192 	struct device_node *of_node;
193 	struct list_head list;
194 
195 	int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
196 			 struct msi_desc *desc);
197 	int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
198 			  int nvec, int type);
199 	void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
200 };
201 
202 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
203 
204 #include <linux/irqhandler.h>
205 #include <asm/msi.h>
206 
207 struct irq_domain;
208 struct irq_domain_ops;
209 struct irq_chip;
210 struct device_node;
211 struct fwnode_handle;
212 struct msi_domain_info;
213 
214 /**
215  * struct msi_domain_ops - MSI interrupt domain callbacks
216  * @get_hwirq:		Retrieve the resulting hw irq number
217  * @msi_init:		Domain specific init function for MSI interrupts
218  * @msi_free:		Domain specific function to free a MSI interrupts
219  * @msi_check:		Callback for verification of the domain/info/dev data
220  * @msi_prepare:	Prepare the allocation of the interrupts in the domain
221  * @msi_finish:		Optional callback to finalize the allocation
222  * @set_desc:		Set the msi descriptor for an interrupt
223  * @handle_error:	Optional error handler if the allocation fails
224  *
225  * @get_hwirq, @msi_init and @msi_free are callbacks used by
226  * msi_create_irq_domain() and related interfaces
227  *
228  * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
229  * are callbacks used by msi_domain_alloc_irqs() and related
230  * interfaces which are based on msi_desc.
231  */
232 struct msi_domain_ops {
233 	irq_hw_number_t	(*get_hwirq)(struct msi_domain_info *info,
234 				     msi_alloc_info_t *arg);
235 	int		(*msi_init)(struct irq_domain *domain,
236 				    struct msi_domain_info *info,
237 				    unsigned int virq, irq_hw_number_t hwirq,
238 				    msi_alloc_info_t *arg);
239 	void		(*msi_free)(struct irq_domain *domain,
240 				    struct msi_domain_info *info,
241 				    unsigned int virq);
242 	int		(*msi_check)(struct irq_domain *domain,
243 				     struct msi_domain_info *info,
244 				     struct device *dev);
245 	int		(*msi_prepare)(struct irq_domain *domain,
246 				       struct device *dev, int nvec,
247 				       msi_alloc_info_t *arg);
248 	void		(*msi_finish)(msi_alloc_info_t *arg, int retval);
249 	void		(*set_desc)(msi_alloc_info_t *arg,
250 				    struct msi_desc *desc);
251 	int		(*handle_error)(struct irq_domain *domain,
252 					struct msi_desc *desc, int error);
253 };
254 
255 /**
256  * struct msi_domain_info - MSI interrupt domain data
257  * @flags:		Flags to decribe features and capabilities
258  * @ops:		The callback data structure
259  * @chip:		Optional: associated interrupt chip
260  * @chip_data:		Optional: associated interrupt chip data
261  * @handler:		Optional: associated interrupt flow handler
262  * @handler_data:	Optional: associated interrupt flow handler data
263  * @handler_name:	Optional: associated interrupt flow handler name
264  * @data:		Optional: domain specific data
265  */
266 struct msi_domain_info {
267 	u32			flags;
268 	struct msi_domain_ops	*ops;
269 	struct irq_chip		*chip;
270 	void			*chip_data;
271 	irq_flow_handler_t	handler;
272 	void			*handler_data;
273 	const char		*handler_name;
274 	void			*data;
275 };
276 
277 /* Flags for msi_domain_info */
278 enum {
279 	/*
280 	 * Init non implemented ops callbacks with default MSI domain
281 	 * callbacks.
282 	 */
283 	MSI_FLAG_USE_DEF_DOM_OPS	= (1 << 0),
284 	/*
285 	 * Init non implemented chip callbacks with default MSI chip
286 	 * callbacks.
287 	 */
288 	MSI_FLAG_USE_DEF_CHIP_OPS	= (1 << 1),
289 	/* Support multiple PCI MSI interrupts */
290 	MSI_FLAG_MULTI_PCI_MSI		= (1 << 2),
291 	/* Support PCI MSIX interrupts */
292 	MSI_FLAG_PCI_MSIX		= (1 << 3),
293 	/* Needs early activate, required for PCI */
294 	MSI_FLAG_ACTIVATE_EARLY		= (1 << 4),
295 	/*
296 	 * Must reactivate when irq is started even when
297 	 * MSI_FLAG_ACTIVATE_EARLY has been set.
298 	 */
299 	MSI_FLAG_MUST_REACTIVATE	= (1 << 5),
300 	/* Is level-triggered capable, using two messages */
301 	MSI_FLAG_LEVEL_CAPABLE		= (1 << 6),
302 };
303 
304 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
305 			    bool force);
306 
307 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
308 					 struct msi_domain_info *info,
309 					 struct irq_domain *parent);
310 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
311 			  int nvec);
312 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
313 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
314 
315 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
316 						  struct msi_domain_info *info,
317 						  struct irq_domain *parent);
318 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
319 				   irq_write_msi_msg_t write_msi_msg);
320 void platform_msi_domain_free_irqs(struct device *dev);
321 
322 /* When an MSI domain is used as an intermediate domain */
323 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
324 			    int nvec, msi_alloc_info_t *args);
325 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
326 			     int virq, int nvec, msi_alloc_info_t *args);
327 struct irq_domain *
328 platform_msi_create_device_domain(struct device *dev,
329 				  unsigned int nvec,
330 				  irq_write_msi_msg_t write_msi_msg,
331 				  const struct irq_domain_ops *ops,
332 				  void *host_data);
333 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
334 			      unsigned int nr_irqs);
335 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
336 			      unsigned int nvec);
337 void *platform_msi_get_host_data(struct irq_domain *domain);
338 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
339 
340 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
341 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
342 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
343 					     struct msi_domain_info *info,
344 					     struct irq_domain *parent);
345 irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
346 					  struct msi_desc *desc);
347 int pci_msi_domain_check_cap(struct irq_domain *domain,
348 			     struct msi_domain_info *info, struct device *dev);
349 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
350 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
351 #else
pci_msi_get_device_domain(struct pci_dev * pdev)352 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
353 {
354 	return NULL;
355 }
356 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
357 
358 #endif /* LINUX_MSI_H */
359