1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6 #include <linux/kernel.h>
7
8 #include "pcie-cadence.h"
9
cdns_pcie_set_outbound_region(struct cdns_pcie * pcie,u8 fn,u32 r,bool is_io,u64 cpu_addr,u64 pci_addr,size_t size)10 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
11 u32 r, bool is_io,
12 u64 cpu_addr, u64 pci_addr, size_t size)
13 {
14 /*
15 * roundup_pow_of_two() returns an unsigned long, which is not suited
16 * for 64bit values.
17 */
18 u64 sz = 1ULL << fls64(size - 1);
19 int nbits = ilog2(sz);
20 u32 addr0, addr1, desc0, desc1;
21
22 if (nbits < 8)
23 nbits = 8;
24
25 /* Set the PCI address */
26 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
27 (lower_32_bits(pci_addr) & GENMASK(31, 8));
28 addr1 = upper_32_bits(pci_addr);
29
30 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
31 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
32
33 /* Set the PCIe header descriptor */
34 if (is_io)
35 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
36 else
37 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
38 desc1 = 0;
39
40 /*
41 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
42 * PCIe descriptor, the PCI function number must be set into
43 * Bits [26:24] of DESC0 anyway.
44 *
45 * In Root Complex mode, the function number is always 0 but in Endpoint
46 * mode, the PCIe controller may support more than one function. This
47 * function number needs to be set properly into the outbound PCIe
48 * descriptor.
49 *
50 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
51 * then the driver must provide the bus, resp. device, number in
52 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
53 * number, the device number is always 0 in Root Complex mode.
54 *
55 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
56 * the PCIe controller will use the captured values for the bus and
57 * device numbers.
58 */
59 if (pcie->is_rc) {
60 /* The device and function numbers are always 0. */
61 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
62 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
63 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
64 } else {
65 /*
66 * Use captured values for bus and device numbers but still
67 * need to set the function number.
68 */
69 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
70 }
71
72 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
73 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
74
75 /* Set the CPU address */
76 cpu_addr -= pcie->mem_res->start;
77 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
78 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
79 addr1 = upper_32_bits(cpu_addr);
80
81 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
82 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
83 }
84
cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie * pcie,u8 fn,u32 r,u64 cpu_addr)85 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
86 u32 r, u64 cpu_addr)
87 {
88 u32 addr0, addr1, desc0, desc1;
89
90 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
91 desc1 = 0;
92
93 /* See cdns_pcie_set_outbound_region() comments above. */
94 if (pcie->is_rc) {
95 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
96 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
97 desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
98 } else {
99 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
100 }
101
102 /* Set the CPU address */
103 cpu_addr -= pcie->mem_res->start;
104 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
105 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
106 addr1 = upper_32_bits(cpu_addr);
107
108 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
109 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
110 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
111 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
112 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
113 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
114 }
115
cdns_pcie_reset_outbound_region(struct cdns_pcie * pcie,u32 r)116 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
117 {
118 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
119 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
120
121 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
122 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
123
124 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
125 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
126 }
127
cdns_pcie_disable_phy(struct cdns_pcie * pcie)128 void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
129 {
130 int i = pcie->phy_count;
131
132 while (i--) {
133 phy_power_off(pcie->phy[i]);
134 phy_exit(pcie->phy[i]);
135 }
136 }
137
cdns_pcie_enable_phy(struct cdns_pcie * pcie)138 int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
139 {
140 int ret;
141 int i;
142
143 for (i = 0; i < pcie->phy_count; i++) {
144 ret = phy_init(pcie->phy[i]);
145 if (ret < 0)
146 goto err_phy;
147
148 ret = phy_power_on(pcie->phy[i]);
149 if (ret < 0) {
150 phy_exit(pcie->phy[i]);
151 goto err_phy;
152 }
153 }
154
155 return 0;
156
157 err_phy:
158 while (--i >= 0) {
159 phy_power_off(pcie->phy[i]);
160 phy_exit(pcie->phy[i]);
161 }
162
163 return ret;
164 }
165
cdns_pcie_init_phy(struct device * dev,struct cdns_pcie * pcie)166 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
167 {
168 struct device_node *np = dev->of_node;
169 int phy_count;
170 struct phy **phy;
171 struct device_link **link;
172 int i;
173 int ret;
174 const char *name;
175
176 phy_count = of_property_count_strings(np, "phy-names");
177 if (phy_count < 1) {
178 dev_err(dev, "no phy-names. PHY will not be initialized\n");
179 pcie->phy_count = 0;
180 return 0;
181 }
182
183 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
184 if (!phy)
185 return -ENOMEM;
186
187 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
188 if (!link)
189 return -ENOMEM;
190
191 for (i = 0; i < phy_count; i++) {
192 of_property_read_string_index(np, "phy-names", i, &name);
193 phy[i] = devm_phy_get(dev, name);
194 if (IS_ERR(phy[i])) {
195 ret = PTR_ERR(phy[i]);
196 goto err_phy;
197 }
198 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
199 if (!link[i]) {
200 devm_phy_put(dev, phy[i]);
201 ret = -EINVAL;
202 goto err_phy;
203 }
204 }
205
206 pcie->phy_count = phy_count;
207 pcie->phy = phy;
208 pcie->link = link;
209
210 ret = cdns_pcie_enable_phy(pcie);
211 if (ret)
212 goto err_phy;
213
214 return 0;
215
216 err_phy:
217 while (--i >= 0) {
218 device_link_del(link[i]);
219 devm_phy_put(dev, phy[i]);
220 }
221
222 return ret;
223 }
224
225 #ifdef CONFIG_PM_SLEEP
cdns_pcie_suspend_noirq(struct device * dev)226 static int cdns_pcie_suspend_noirq(struct device *dev)
227 {
228 struct cdns_pcie *pcie = dev_get_drvdata(dev);
229
230 cdns_pcie_disable_phy(pcie);
231
232 return 0;
233 }
234
cdns_pcie_resume_noirq(struct device * dev)235 static int cdns_pcie_resume_noirq(struct device *dev)
236 {
237 struct cdns_pcie *pcie = dev_get_drvdata(dev);
238 int ret;
239
240 ret = cdns_pcie_enable_phy(pcie);
241 if (ret) {
242 dev_err(dev, "failed to enable phy\n");
243 return ret;
244 }
245
246 return 0;
247 }
248 #endif
249
250 const struct dev_pm_ops cdns_pcie_pm_ops = {
251 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
252 cdns_pcie_resume_noirq)
253 };
254