Lines Matching refs:ep
53 struct scif_endpt *ep; in scif_open() local
57 ep = kzalloc(sizeof(*ep), GFP_KERNEL); in scif_open()
58 if (!ep) in scif_open()
61 ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL); in scif_open()
62 if (!ep->qp_info.qp) in scif_open()
65 err = scif_anon_inode_getfile(ep); in scif_open()
69 spin_lock_init(&ep->lock); in scif_open()
70 mutex_init(&ep->sendlock); in scif_open()
71 mutex_init(&ep->recvlock); in scif_open()
73 scif_rma_ep_init(ep); in scif_open()
74 ep->state = SCIFEP_UNBOUND; in scif_open()
76 "SCIFAPI open: ep %p success\n", ep); in scif_open()
77 return ep; in scif_open()
80 kfree(ep->qp_info.qp); in scif_open()
82 kfree(ep); in scif_open()
92 static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep) in scif_disconnect_ep() argument
108 wake_up_interruptible(&ep->sendwq); in scif_disconnect_ep()
109 wake_up_interruptible(&ep->recvwq); in scif_disconnect_ep()
110 mutex_lock(&ep->sendlock); in scif_disconnect_ep()
111 mutex_unlock(&ep->sendlock); in scif_disconnect_ep()
112 mutex_lock(&ep->recvlock); in scif_disconnect_ep()
113 mutex_unlock(&ep->recvlock); in scif_disconnect_ep()
119 if (tmpep == ep) { in scif_disconnect_ep()
122 spin_lock(&ep->lock); in scif_disconnect_ep()
136 if (tmpep == ep) { in scif_disconnect_ep()
145 init_completion(&ep->discon); in scif_disconnect_ep()
147 msg.src = ep->port; in scif_disconnect_ep()
148 msg.dst = ep->peer; in scif_disconnect_ep()
149 msg.payload[0] = (u64)ep; in scif_disconnect_ep()
150 msg.payload[1] = ep->remote_ep; in scif_disconnect_ep()
152 err = scif_nodeqp_send(ep->remote_dev, &msg); in scif_disconnect_ep()
153 spin_unlock(&ep->lock); in scif_disconnect_ep()
158 wait_for_completion_timeout(&ep->discon, in scif_disconnect_ep()
160 return ep; in scif_disconnect_ep()
165 struct scif_endpt *ep = (struct scif_endpt *)epd; in scif_close() local
172 ep, scif_ep_states[ep->state]); in scif_close()
174 spin_lock(&ep->lock); in scif_close()
175 flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS); in scif_close()
176 spin_unlock(&ep->lock); in scif_close()
181 spin_lock(&ep->lock); in scif_close()
182 oldstate = ep->state; in scif_close()
184 ep->state = SCIFEP_CLOSING; in scif_close()
192 spin_unlock(&ep->lock); in scif_close()
198 if (tmpep == ep) { in scif_close()
208 spin_unlock(&ep->lock); in scif_close()
214 spin_unlock(&ep->lock); in scif_close()
216 scif_disconnect_ep(ep); in scif_close()
226 spin_unlock(&ep->lock); in scif_close()
232 if (tmpep == ep) in scif_close()
236 while (ep->acceptcnt) { in scif_close()
237 aep = list_first_entry(&ep->li_accept, in scif_close()
271 ep->acceptcnt--; in scif_close()
274 spin_lock(&ep->lock); in scif_close()
278 while (ep->conreqcnt) { in scif_close()
279 conreq = list_first_entry(&ep->conlist, in scif_close()
295 ep->conreqcnt--; in scif_close()
299 spin_unlock(&ep->lock); in scif_close()
301 wake_up_interruptible(&ep->conwq); in scif_close()
305 scif_put_port(ep->port.port); in scif_close()
306 scif_anon_inode_fput(ep); in scif_close()
307 scif_teardown_ep(ep); in scif_close()
308 scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD); in scif_close()
320 struct scif_endpt *ep = (struct scif_endpt *)epd; in __scif_flush() local
322 switch (ep->state) { in __scif_flush()
325 ep->state = SCIFEP_CLLISTEN; in __scif_flush()
328 wake_up_interruptible(&ep->conwq); in __scif_flush()
339 struct scif_endpt *ep = (struct scif_endpt *)epd; in scif_bind() local
345 ep, scif_ep_states[ep->state], pn); in scif_bind()
358 spin_lock(&ep->lock); in scif_bind()
359 if (ep->state == SCIFEP_BOUND) { in scif_bind()
362 } else if (ep->state != SCIFEP_UNBOUND) { in scif_bind()
380 ep->state = SCIFEP_BOUND; in scif_bind()
381 ep->port.node = scif_info.nodeid; in scif_bind()
382 ep->port.port = pn; in scif_bind()
383 ep->conn_async_state = ASYNC_CONN_IDLE; in scif_bind()
388 spin_unlock(&ep->lock); in scif_bind()
396 struct scif_endpt *ep = (struct scif_endpt *)epd; in scif_listen() local
399 "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]); in scif_listen()
400 spin_lock(&ep->lock); in scif_listen()
401 switch (ep->state) { in scif_listen()
407 spin_unlock(&ep->lock); in scif_listen()
413 spin_unlock(&ep->lock); in scif_listen()
419 ep->state = SCIFEP_LISTENING; in scif_listen()
420 ep->backlog = backlog; in scif_listen()
422 ep->conreqcnt = 0; in scif_listen()
423 ep->acceptcnt = 0; in scif_listen()
424 INIT_LIST_HEAD(&ep->conlist); in scif_listen()
425 init_waitqueue_head(&ep->conwq); in scif_listen()
426 INIT_LIST_HEAD(&ep->li_accept); in scif_listen()
427 spin_unlock(&ep->lock); in scif_listen()
433 scif_teardown_ep(ep); in scif_listen()
434 ep->qp_info.qp = NULL; in scif_listen()
437 list_add_tail(&ep->list, &scif_info.listen); in scif_listen()
471 static int scif_conn_func(struct scif_endpt *ep) in scif_conn_func() argument
477 err = scif_reserve_dma_chan(ep); in scif_conn_func()
479 dev_err(&ep->remote_dev->sdev->dev, in scif_conn_func()
481 ep->state = SCIFEP_BOUND; in scif_conn_func()
485 err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset, in scif_conn_func()
486 SCIF_ENDPT_QP_SIZE, ep->remote_dev); in scif_conn_func()
488 dev_err(&ep->remote_dev->sdev->dev, in scif_conn_func()
490 __func__, err, ep->qp_info.qp_offset); in scif_conn_func()
491 ep->state = SCIFEP_BOUND; in scif_conn_func()
495 spdev = scif_get_peer_dev(ep->remote_dev); in scif_conn_func()
501 msg.src = ep->port; in scif_conn_func()
502 msg.dst = ep->conn_port; in scif_conn_func()
504 msg.payload[0] = (u64)ep; in scif_conn_func()
505 msg.payload[1] = ep->qp_info.qp_offset; in scif_conn_func()
506 err = _scif_nodeqp_send(ep->remote_dev, &msg); in scif_conn_func()
514 err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING, in scif_conn_func()
517 dev_err(&ep->remote_dev->sdev->dev, in scif_conn_func()
519 ep->state = SCIFEP_BOUND; in scif_conn_func()
521 spdev = scif_get_peer_dev(ep->remote_dev); in scif_conn_func()
526 if (ep->state == SCIFEP_MAPPING) { in scif_conn_func()
527 err = scif_setup_qp_connect_response(ep->remote_dev, in scif_conn_func()
528 ep->qp_info.qp, in scif_conn_func()
529 ep->qp_info.gnt_pld); in scif_conn_func()
535 dev_err(&ep->remote_dev->sdev->dev, in scif_conn_func()
538 msg.payload[0] = ep->remote_ep; in scif_conn_func()
539 _scif_nodeqp_send(ep->remote_dev, &msg); in scif_conn_func()
540 ep->state = SCIFEP_BOUND; in scif_conn_func()
545 msg.payload[0] = ep->remote_ep; in scif_conn_func()
546 err = _scif_nodeqp_send(ep->remote_dev, &msg); in scif_conn_func()
548 ep->state = SCIFEP_BOUND; in scif_conn_func()
551 ep->state = SCIFEP_CONNECTED; in scif_conn_func()
553 list_add_tail(&ep->list, &scif_info.connected); in scif_conn_func()
555 dev_dbg(&ep->remote_dev->sdev->dev, in scif_conn_func()
556 "SCIFAPI connect: ep %p connected\n", ep); in scif_conn_func()
557 } else if (ep->state == SCIFEP_BOUND) { in scif_conn_func()
558 dev_dbg(&ep->remote_dev->sdev->dev, in scif_conn_func()
559 "SCIFAPI connect: ep %p connection refused\n", ep); in scif_conn_func()
568 scif_cleanup_ep_qp(ep); in scif_conn_func()
581 struct scif_endpt *ep; in scif_conn_handler() local
584 ep = NULL; in scif_conn_handler()
587 ep = list_first_entry(&scif_info.nb_connect_list, in scif_conn_handler()
589 list_del(&ep->conn_list); in scif_conn_handler()
592 if (ep) { in scif_conn_handler()
593 ep->conn_err = scif_conn_func(ep); in scif_conn_handler()
594 wake_up_interruptible(&ep->conn_pend_wq); in scif_conn_handler()
596 } while (ep); in scif_conn_handler()
601 struct scif_endpt *ep = (struct scif_endpt *)epd; in __scif_connect() local
606 dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep, in __scif_connect()
607 scif_ep_states[ep->state]); in __scif_connect()
621 spin_lock(&ep->lock); in __scif_connect()
622 switch (ep->state) { in __scif_connect()
628 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) in __scif_connect()
629 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK; in __scif_connect()
639 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) in __scif_connect()
645 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) in __scif_connect()
646 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK; in __scif_connect()
654 ep->port.port = err; in __scif_connect()
655 ep->port.node = scif_info.nodeid; in __scif_connect()
656 ep->conn_async_state = ASYNC_CONN_IDLE; in __scif_connect()
671 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { in __scif_connect()
672 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK; in __scif_connect()
673 } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) { in __scif_connect()
676 ep->conn_port = *dst; in __scif_connect()
677 init_waitqueue_head(&ep->sendwq); in __scif_connect()
678 init_waitqueue_head(&ep->recvwq); in __scif_connect()
679 init_waitqueue_head(&ep->conwq); in __scif_connect()
680 ep->conn_async_state = 0; in __scif_connect()
683 ep->conn_async_state = ASYNC_CONN_INPROGRESS; in __scif_connect()
688 if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) in __scif_connect()
691 ep->state = SCIFEP_CONNECTING; in __scif_connect()
692 ep->remote_dev = &scif_dev[dst->node]; in __scif_connect()
693 ep->qp_info.qp->magic = SCIFEP_MAGIC; in __scif_connect()
694 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { in __scif_connect()
695 init_waitqueue_head(&ep->conn_pend_wq); in __scif_connect()
697 list_add_tail(&ep->conn_list, &scif_info.nb_connect_list); in __scif_connect()
703 spin_unlock(&ep->lock); in __scif_connect()
707 } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) { in __scif_connect()
709 err = ep->conn_err; in __scif_connect()
710 spin_lock(&ep->lock); in __scif_connect()
711 ep->conn_async_state = ASYNC_CONN_IDLE; in __scif_connect()
712 spin_unlock(&ep->lock); in __scif_connect()
714 err = scif_conn_func(ep); in __scif_connect()
947 struct scif_endpt *ep = (struct scif_endpt *)epd; in _scif_send() local
951 struct scif_qp *qp = ep->qp_info.qp; in _scif_send()
956 spin_lock(&ep->lock); in _scif_send()
957 while (sent_len != len && SCIFEP_CONNECTED == ep->state) { in _scif_send()
972 notif_msg.src = ep->port; in _scif_send()
974 notif_msg.payload[0] = ep->remote_ep; in _scif_send()
975 ret = _scif_nodeqp_send(ep->remote_dev, ¬if_msg); in _scif_send()
987 spin_unlock(&ep->lock); in _scif_send()
990 wait_event_interruptible(ep->sendwq, in _scif_send()
991 (SCIFEP_CONNECTED != ep->state) || in _scif_send()
994 spin_lock(&ep->lock); in _scif_send()
1000 else if (!ret && SCIFEP_CONNECTED != ep->state) in _scif_send()
1001 ret = SCIFEP_DISCONNECTED == ep->state ? in _scif_send()
1003 spin_unlock(&ep->lock); in _scif_send()
1010 struct scif_endpt *ep = (struct scif_endpt *)epd; in _scif_recv() local
1014 struct scif_qp *qp = ep->qp_info.qp; in _scif_recv()
1018 spin_lock(&ep->lock); in _scif_recv()
1019 while (remaining_len && (SCIFEP_CONNECTED == ep->state || in _scif_recv()
1020 SCIFEP_DISCONNECTED == ep->state)) { in _scif_recv()
1031 if (ep->state == SCIFEP_CONNECTED) { in _scif_recv()
1044 notif_msg.src = ep->port; in _scif_recv()
1046 notif_msg.payload[0] = ep->remote_ep; in _scif_recv()
1047 ret = _scif_nodeqp_send(ep->remote_dev, in _scif_recv()
1060 if (ep->state == SCIFEP_DISCONNECTED) in _scif_recv()
1069 spin_unlock(&ep->lock); in _scif_recv()
1075 wait_event_interruptible(ep->recvwq, in _scif_recv()
1076 SCIFEP_CONNECTED != ep->state || in _scif_recv()
1080 spin_lock(&ep->lock); in _scif_recv()
1086 else if (!ret && ep->state != SCIFEP_CONNECTED) in _scif_recv()
1087 ret = ep->state == SCIFEP_DISCONNECTED ? in _scif_recv()
1089 spin_unlock(&ep->lock); in _scif_recv()
1105 struct scif_endpt *ep = (struct scif_endpt *)epd; in scif_user_send() local
1113 "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]); in scif_user_send()
1131 mutex_lock(&ep->sendlock); in scif_user_send()
1148 mutex_unlock(&ep->sendlock); in scif_user_send()
1166 struct scif_endpt *ep = (struct scif_endpt *)epd; in scif_user_recv() local
1174 "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]); in scif_user_recv()
1192 mutex_lock(&ep->recvlock); in scif_user_recv()
1209 mutex_unlock(&ep->recvlock); in scif_user_recv()
1227 struct scif_endpt *ep = (struct scif_endpt *)epd; in scif_send() local
1231 "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]); in scif_send()
1238 if (!ep->remote_dev) in scif_send()
1247 mutex_lock(&ep->sendlock); in scif_send()
1252 mutex_unlock(&ep->sendlock); in scif_send()
1269 struct scif_endpt *ep = (struct scif_endpt *)epd; in scif_recv() local
1273 "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]); in scif_recv()
1287 mutex_lock(&ep->recvlock); in scif_recv()
1292 mutex_unlock(&ep->recvlock); in scif_recv()
1299 poll_table *p, struct scif_endpt *ep) in _scif_poll_wait() argument
1308 spin_unlock(&ep->lock); in _scif_poll_wait()
1310 spin_lock(&ep->lock); in _scif_poll_wait()
1314 __scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep) in __scif_pollfd() argument
1319 "SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]); in __scif_pollfd()
1321 spin_lock(&ep->lock); in __scif_pollfd()
1324 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { in __scif_pollfd()
1325 _scif_poll_wait(f, &ep->conn_pend_wq, wait, ep); in __scif_pollfd()
1326 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { in __scif_pollfd()
1327 if (ep->state == SCIFEP_CONNECTED || in __scif_pollfd()
1328 ep->state == SCIFEP_DISCONNECTED || in __scif_pollfd()
1329 ep->conn_err) in __scif_pollfd()
1336 if (ep->state == SCIFEP_LISTENING) { in __scif_pollfd()
1337 _scif_poll_wait(f, &ep->conwq, wait, ep); in __scif_pollfd()
1338 if (ep->state == SCIFEP_LISTENING) { in __scif_pollfd()
1339 if (ep->conreqcnt) in __scif_pollfd()
1346 if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) { in __scif_pollfd()
1348 _scif_poll_wait(f, &ep->recvwq, wait, ep); in __scif_pollfd()
1350 _scif_poll_wait(f, &ep->sendwq, wait, ep); in __scif_pollfd()
1351 if (ep->state == SCIFEP_CONNECTED || in __scif_pollfd()
1352 ep->state == SCIFEP_DISCONNECTED) { in __scif_pollfd()
1354 if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1)) in __scif_pollfd()
1357 if (scif_rb_space(&ep->qp_info.qp->outbound_q)) in __scif_pollfd()
1360 if (ep->state == SCIFEP_DISCONNECTED) in __scif_pollfd()
1369 spin_unlock(&ep->lock); in __scif_pollfd()