blob: 00e5d6d66e7bb48b17b05844d8034d91673b6c9a [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19#include "scif_map.h"
20
21void scif_cleanup_ep_qp(struct scif_endpt *ep)
22{
23 struct scif_qp *qp = ep->qp_info.qp;
24
25 if (qp->outbound_q.rb_base) {
26 scif_iounmap((void *)qp->outbound_q.rb_base,
27 qp->outbound_q.size, ep->remote_dev);
28 qp->outbound_q.rb_base = NULL;
29 }
30 if (qp->remote_qp) {
31 scif_iounmap((void *)qp->remote_qp,
32 sizeof(struct scif_qp), ep->remote_dev);
33 qp->remote_qp = NULL;
34 }
35 if (qp->local_qp) {
36 scif_unmap_single(qp->local_qp, ep->remote_dev,
37 sizeof(struct scif_qp));
38 qp->local_qp = 0x0;
39 }
40 if (qp->local_buf) {
41 scif_unmap_single(qp->local_buf, ep->remote_dev,
42 SCIF_ENDPT_QP_SIZE);
43 qp->local_buf = 0;
44 }
45}
46
47void scif_teardown_ep(void *endpt)
48{
49 struct scif_endpt *ep = endpt;
50 struct scif_qp *qp = ep->qp_info.qp;
51
52 if (qp) {
53 spin_lock(&ep->lock);
54 scif_cleanup_ep_qp(ep);
55 spin_unlock(&ep->lock);
56 kfree(qp->inbound_q.rb_base);
57 kfree(qp);
58 }
59}
60
61/*
62 * Enqueue the endpoint to the zombie list for cleanup.
63 * The endpoint should not be accessed once this API returns.
64 */
65void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
66{
67 if (!eplock_held)
68 mutex_lock(&scif_info.eplock);
69 spin_lock(&ep->lock);
70 ep->state = SCIFEP_ZOMBIE;
71 spin_unlock(&ep->lock);
72 list_add_tail(&ep->list, &scif_info.zombie);
73 scif_info.nr_zombies++;
74 if (!eplock_held)
75 mutex_unlock(&scif_info.eplock);
76 schedule_work(&scif_info.misc_work);
77}
78
79static struct scif_endpt *scif_find_listen_ep(u16 port)
80{
81 struct scif_endpt *ep = NULL;
82 struct list_head *pos, *tmpq;
83
84 mutex_lock(&scif_info.eplock);
85 list_for_each_safe(pos, tmpq, &scif_info.listen) {
86 ep = list_entry(pos, struct scif_endpt, list);
87 if (ep->port.port == port) {
88 mutex_unlock(&scif_info.eplock);
89 return ep;
90 }
91 }
92 mutex_unlock(&scif_info.eplock);
93 return NULL;
94}
95
96void scif_cleanup_zombie_epd(void)
97{
98 struct list_head *pos, *tmpq;
99 struct scif_endpt *ep;
100
101 mutex_lock(&scif_info.eplock);
102 list_for_each_safe(pos, tmpq, &scif_info.zombie) {
103 ep = list_entry(pos, struct scif_endpt, list);
104 if (scif_rma_ep_can_uninit(ep)) {
105 list_del(pos);
106 scif_info.nr_zombies--;
107 put_iova_domain(&ep->rma_info.iovad);
108 kfree(ep);
109 }
110 }
111 mutex_unlock(&scif_info.eplock);
112}
113
114/**
115 * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
116 * @msg: Interrupt message
117 *
118 * This message is initiated by the remote node to request a connection
119 * to the local node. This function looks for an end point in the
120 * listen state on the requested port id.
121 *
122 * If it finds a listening port it places the connect request on the
123 * listening end points queue and wakes up any pending accept calls.
124 *
125 * If it does not find a listening end point it sends a connection
126 * reject message to the remote node.
127 */
128void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
129{
130 struct scif_endpt *ep = NULL;
131 struct scif_conreq *conreq;
132
133 conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
134 if (!conreq)
135 /* Lack of resources so reject the request. */
136 goto conreq_sendrej;
137
138 ep = scif_find_listen_ep(msg->dst.port);
139 if (!ep)
140 /* Send reject due to no listening ports */
141 goto conreq_sendrej_free;
142 else
143 spin_lock(&ep->lock);
144
145 if (ep->backlog <= ep->conreqcnt) {
146 /* Send reject due to too many pending requests */
147 spin_unlock(&ep->lock);
148 goto conreq_sendrej_free;
149 }
150
151 conreq->msg = *msg;
152 list_add_tail(&conreq->list, &ep->conlist);
153 ep->conreqcnt++;
154 wake_up_interruptible(&ep->conwq);
155 spin_unlock(&ep->lock);
156 return;
157
158conreq_sendrej_free:
159 kfree(conreq);
160conreq_sendrej:
161 msg->uop = SCIF_CNCT_REJ;
162 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
163}
164
165/**
166 * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
167 * @msg: Interrupt message
168 *
169 * An accept() on the remote node has occurred and sent this message
170 * to indicate success. Place the end point in the MAPPING state and
171 * save the remote nodes memory information. Then wake up the connect
172 * request so it can finish.
173 */
174void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
175{
176 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
177
178 spin_lock(&ep->lock);
179 if (SCIFEP_CONNECTING == ep->state) {
180 ep->peer.node = msg->src.node;
181 ep->peer.port = msg->src.port;
182 ep->qp_info.gnt_pld = msg->payload[1];
183 ep->remote_ep = msg->payload[2];
184 ep->state = SCIFEP_MAPPING;
185
186 wake_up(&ep->conwq);
187 }
188 spin_unlock(&ep->lock);
189}
190
191/**
192 * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
193 * @msg: Interrupt message
194 *
195 * The remote connection request has finished mapping the local memory.
196 * Place the connection in the connected state and wake up the pending
197 * accept() call.
198 */
199void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
200{
201 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
202
203 mutex_lock(&scif_info.connlock);
204 spin_lock(&ep->lock);
205 /* New ep is now connected with all resources set. */
206 ep->state = SCIFEP_CONNECTED;
207 list_add_tail(&ep->list, &scif_info.connected);
208 wake_up(&ep->conwq);
209 spin_unlock(&ep->lock);
210 mutex_unlock(&scif_info.connlock);
211}
212
213/**
214 * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
215 * @msg: Interrupt message
216 *
217 * The remote connection request failed to map the local memory it was sent.
218 * Place the end point in the CLOSING state to indicate it and wake up
219 * the pending accept();
220 */
221void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
222{
223 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
224
225 spin_lock(&ep->lock);
226 ep->state = SCIFEP_CLOSING;
227 wake_up(&ep->conwq);
228 spin_unlock(&ep->lock);
229}
230
231/**
232 * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
233 * @msg: Interrupt message
234 *
235 * The remote end has rejected the connection request. Set the end
236 * point back to the bound state and wake up the pending connect().
237 */
238void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
239{
240 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
241
242 spin_lock(&ep->lock);
243 if (SCIFEP_CONNECTING == ep->state) {
244 ep->state = SCIFEP_BOUND;
245 wake_up(&ep->conwq);
246 }
247 spin_unlock(&ep->lock);
248}
249
250/**
251 * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
252 * @msg: Interrupt message
253 *
254 * The remote node has indicated close() has been called on its end
255 * point. Remove the local end point from the connected list, set its
256 * state to disconnected and ensure accesses to the remote node are
257 * shutdown.
258 *
259 * When all accesses to the remote end have completed then send a
260 * DISCNT_ACK to indicate it can remove its resources and complete
261 * the close routine.
262 */
263void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
264{
265 struct scif_endpt *ep = NULL;
266 struct scif_endpt *tmpep;
267 struct list_head *pos, *tmpq;
268
269 mutex_lock(&scif_info.connlock);
270 list_for_each_safe(pos, tmpq, &scif_info.connected) {
271 tmpep = list_entry(pos, struct scif_endpt, list);
272 /*
273 * The local ep may have sent a disconnect and and been closed
274 * due to a message response time out. It may have been
275 * allocated again and formed a new connection so we want to
276 * check if the remote ep matches
277 */
278 if (((u64)tmpep == msg->payload[1]) &&
279 ((u64)tmpep->remote_ep == msg->payload[0])) {
280 list_del(pos);
281 ep = tmpep;
282 spin_lock(&ep->lock);
283 break;
284 }
285 }
286
287 /*
288 * If the terminated end is not found then this side started closing
289 * before the other side sent the disconnect. If so the ep will no
290 * longer be on the connected list. Regardless the other side
291 * needs to be acked to let it know close is complete.
292 */
293 if (!ep) {
294 mutex_unlock(&scif_info.connlock);
295 goto discnct_ack;
296 }
297
298 ep->state = SCIFEP_DISCONNECTED;
299 list_add_tail(&ep->list, &scif_info.disconnected);
300
301 wake_up_interruptible(&ep->sendwq);
302 wake_up_interruptible(&ep->recvwq);
303 spin_unlock(&ep->lock);
304 mutex_unlock(&scif_info.connlock);
305
306discnct_ack:
307 msg->uop = SCIF_DISCNT_ACK;
308 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
309}
310
311/**
312 * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
313 * @msg: Interrupt message
314 *
315 * Remote side has indicated it has not more references to local resources
316 */
317void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
318{
319 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
320
321 spin_lock(&ep->lock);
322 ep->state = SCIFEP_DISCONNECTED;
323 spin_unlock(&ep->lock);
324 complete(&ep->discon);
325}
326
327/**
328 * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
329 * @msg: Interrupt message
330 *
331 * Remote side is confirming send or receive interrupt handling is complete.
332 */
333void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
334{
335 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
336
337 spin_lock(&ep->lock);
338 if (SCIFEP_CONNECTED == ep->state)
339 wake_up_interruptible(&ep->recvwq);
340 spin_unlock(&ep->lock);
341}
342
343/**
344 * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
345 * @msg: Interrupt message
346 *
347 * Remote side is confirming send or receive interrupt handling is complete.
348 */
349void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
350{
351 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
352
353 spin_lock(&ep->lock);
354 if (SCIFEP_CONNECTED == ep->state)
355 wake_up_interruptible(&ep->sendwq);
356 spin_unlock(&ep->lock);
357}