1 /*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18 #include "scif_peer_bus.h"
19
20 #include "scif_main.h"
21 #include "scif_map.h"
22
23 /**
24 * scif_invalidate_ep() - Set state for all connected endpoints
25 * to disconnected and wake up all send/recv waitqueues
26 */
scif_invalidate_ep(int node)27 static void scif_invalidate_ep(int node)
28 {
29 struct scif_endpt *ep;
30 struct list_head *pos, *tmpq;
31
32 flush_work(&scif_info.conn_work);
33 mutex_lock(&scif_info.connlock);
34 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
35 ep = list_entry(pos, struct scif_endpt, list);
36 if (ep->remote_dev->node == node) {
37 scif_unmap_all_windows(ep);
38 spin_lock(&ep->lock);
39 scif_cleanup_ep_qp(ep);
40 spin_unlock(&ep->lock);
41 }
42 }
43 list_for_each_safe(pos, tmpq, &scif_info.connected) {
44 ep = list_entry(pos, struct scif_endpt, list);
45 if (ep->remote_dev->node == node) {
46 list_del(pos);
47 spin_lock(&ep->lock);
48 ep->state = SCIFEP_DISCONNECTED;
49 list_add_tail(&ep->list, &scif_info.disconnected);
50 scif_cleanup_ep_qp(ep);
51 wake_up_interruptible(&ep->sendwq);
52 wake_up_interruptible(&ep->recvwq);
53 spin_unlock(&ep->lock);
54 scif_unmap_all_windows(ep);
55 }
56 }
57 mutex_unlock(&scif_info.connlock);
58 }
59
scif_free_qp(struct scif_dev * scifdev)60 void scif_free_qp(struct scif_dev *scifdev)
61 {
62 struct scif_qp *qp = scifdev->qpairs;
63
64 if (!qp)
65 return;
66 scif_unmap_single(qp->local_buf, scifdev, qp->inbound_q.size);
67 kfree(qp->inbound_q.rb_base);
68 scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
69 kfree(scifdev->qpairs);
70 scifdev->qpairs = NULL;
71 }
72
scif_cleanup_qp(struct scif_dev * dev)73 static void scif_cleanup_qp(struct scif_dev *dev)
74 {
75 struct scif_qp *qp = &dev->qpairs[0];
76
77 if (!qp)
78 return;
79 scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev);
80 scif_iounmap((void *)qp->outbound_q.rb_base,
81 sizeof(struct scif_qp), dev);
82 qp->remote_qp = NULL;
83 qp->local_write = 0;
84 qp->inbound_q.current_write_offset = 0;
85 qp->inbound_q.current_read_offset = 0;
86 if (scifdev_is_p2p(dev))
87 scif_free_qp(dev);
88 }
89
scif_send_acks(struct scif_dev * dev)90 void scif_send_acks(struct scif_dev *dev)
91 {
92 struct scifmsg msg;
93
94 if (dev->node_remove_ack_pending) {
95 msg.uop = SCIF_NODE_REMOVE_ACK;
96 msg.src.node = scif_info.nodeid;
97 msg.dst.node = SCIF_MGMT_NODE;
98 msg.payload[0] = dev->node;
99 scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg);
100 dev->node_remove_ack_pending = false;
101 }
102 if (dev->exit_ack_pending) {
103 msg.uop = SCIF_EXIT_ACK;
104 msg.src.node = scif_info.nodeid;
105 msg.dst.node = dev->node;
106 scif_nodeqp_send(dev, &msg);
107 dev->exit_ack_pending = false;
108 }
109 }
110
111 /*
112 * scif_cleanup_scifdev
113 *
114 * @dev: Remote SCIF device.
115 * Uninitialize SCIF data structures for remote SCIF device.
116 */
scif_cleanup_scifdev(struct scif_dev * dev)117 void scif_cleanup_scifdev(struct scif_dev *dev)
118 {
119 struct scif_hw_dev *sdev = dev->sdev;
120
121 if (!dev->sdev)
122 return;
123 if (scifdev_is_p2p(dev)) {
124 if (dev->cookie) {
125 sdev->hw_ops->free_irq(sdev, dev->cookie, dev);
126 dev->cookie = NULL;
127 }
128 scif_destroy_intr_wq(dev);
129 }
130 flush_work(&scif_info.misc_work);
131 scif_destroy_p2p(dev);
132 scif_invalidate_ep(dev->node);
133 scif_zap_mmaps(dev->node);
134 scif_cleanup_rma_for_zombies(dev->node);
135 flush_work(&scif_info.misc_work);
136 scif_send_acks(dev);
137 if (!dev->node && scif_info.card_initiated_exit) {
138 /*
139 * Send an SCIF_EXIT message which is the last message from MIC
140 * to the Host and wait for a SCIF_EXIT_ACK
141 */
142 scif_send_exit(dev);
143 scif_info.card_initiated_exit = false;
144 }
145 scif_cleanup_qp(dev);
146 }
147
148 /*
149 * scif_remove_node:
150 *
151 * @node: Node to remove
152 */
scif_handle_remove_node(int node)153 void scif_handle_remove_node(int node)
154 {
155 struct scif_dev *scifdev = &scif_dev[node];
156
157 if (scif_peer_unregister_device(scifdev))
158 scif_send_acks(scifdev);
159 }
160
scif_send_rmnode_msg(int node,int remove_node)161 static int scif_send_rmnode_msg(int node, int remove_node)
162 {
163 struct scifmsg notif_msg;
164 struct scif_dev *dev = &scif_dev[node];
165
166 notif_msg.uop = SCIF_NODE_REMOVE;
167 notif_msg.src.node = scif_info.nodeid;
168 notif_msg.dst.node = node;
169 notif_msg.payload[0] = remove_node;
170 return scif_nodeqp_send(dev, ¬if_msg);
171 }
172
173 /**
174 * scif_node_disconnect:
175 *
176 * @node_id[in]: source node id.
177 * @mgmt_initiated: Disconnection initiated from the mgmt node
178 *
179 * Disconnect a node from the scif network.
180 */
scif_disconnect_node(u32 node_id,bool mgmt_initiated)181 void scif_disconnect_node(u32 node_id, bool mgmt_initiated)
182 {
183 int ret;
184 int msg_cnt = 0;
185 u32 i = 0;
186 struct scif_dev *scifdev = &scif_dev[node_id];
187
188 if (!node_id)
189 return;
190
191 atomic_set(&scifdev->disconn_rescnt, 0);
192
193 /* Destroy p2p network */
194 for (i = 1; i <= scif_info.maxid; i++) {
195 if (i == node_id)
196 continue;
197 ret = scif_send_rmnode_msg(i, node_id);
198 if (!ret)
199 msg_cnt++;
200 }
201 /* Wait for the remote nodes to respond with SCIF_NODE_REMOVE_ACK */
202 ret = wait_event_timeout(scifdev->disconn_wq,
203 (atomic_read(&scifdev->disconn_rescnt)
204 == msg_cnt), SCIF_NODE_ALIVE_TIMEOUT);
205 /* Tell the card to clean up */
206 if (mgmt_initiated && _scifdev_alive(scifdev))
207 /*
208 * Send an SCIF_EXIT message which is the last message from Host
209 * to the MIC and wait for a SCIF_EXIT_ACK
210 */
211 scif_send_exit(scifdev);
212 atomic_set(&scifdev->disconn_rescnt, 0);
213 /* Tell the mgmt node to clean up */
214 ret = scif_send_rmnode_msg(SCIF_MGMT_NODE, node_id);
215 if (!ret)
216 /* Wait for mgmt node to respond with SCIF_NODE_REMOVE_ACK */
217 wait_event_timeout(scifdev->disconn_wq,
218 (atomic_read(&scifdev->disconn_rescnt) == 1),
219 SCIF_NODE_ALIVE_TIMEOUT);
220 }
221
scif_get_node_info(void)222 void scif_get_node_info(void)
223 {
224 struct scifmsg msg;
225 DECLARE_COMPLETION_ONSTACK(node_info);
226
227 msg.uop = SCIF_GET_NODE_INFO;
228 msg.src.node = scif_info.nodeid;
229 msg.dst.node = SCIF_MGMT_NODE;
230 msg.payload[3] = (u64)&node_info;
231
232 if ((scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg)))
233 return;
234
235 /* Wait for a response with SCIF_GET_NODE_INFO */
236 wait_for_completion(&node_info);
237 }
238