Home
last modified time | relevance | path

Searched refs:sk (Results 1 – 25 of 140) sorted by relevance

123456

/linux-4.19.296/include/net/
Dsock.h86 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ argument
91 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
502 void (*sk_state_change)(struct sock *sk);
503 void (*sk_data_ready)(struct sock *sk);
504 void (*sk_write_space)(struct sock *sk);
505 void (*sk_error_report)(struct sock *sk);
506 int (*sk_backlog_rcv)(struct sock *sk,
509 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
513 void (*sk_destruct)(struct sock *sk);
524 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
[all …]
Dllc_c_ev.h128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
137 int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
138 int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
[all …]
Dllc_c_ac.h90 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
92 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
93 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
94 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
95 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
96 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
97 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
98 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
99 int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
101 int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
[all …]
Dinet_connection_sock.h38 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
39 void (*send_check)(struct sock *sk, struct sk_buff *skb);
40 int (*rebuild_header)(struct sock *sk);
41 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
42 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
43 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
51 int (*setsockopt)(struct sock *sk, int level, int optname,
53 int (*getsockopt)(struct sock *sk, int level, int optname,
56 int (*compat_setsockopt)(struct sock *sk,
59 int (*compat_getsockopt)(struct sock *sk,
[all …]
Dtcp.h54 void tcp_time_wait(struct sock *sk, int state, int timeo);
259 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure() argument
261 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in tcp_under_memory_pressure()
262 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in tcp_under_memory_pressure()
284 static inline bool tcp_out_of_memory(struct sock *sk) in tcp_out_of_memory() argument
286 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && in tcp_out_of_memory()
287 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) in tcp_out_of_memory()
292 void sk_forced_mem_schedule(struct sock *sk, int size);
294 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) in tcp_too_many_orphans() argument
296 struct percpu_counter *ocp = sk->sk_prot->orphan_count; in tcp_too_many_orphans()
[all …]
Dinet_sock.h107 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) in inet_rsk() argument
109 return (struct inet_request_sock *)sk; in inet_rsk()
112 static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) in inet_request_mark() argument
114 if (!sk->sk_mark && in inet_request_mark()
115 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) in inet_request_mark()
118 return sk->sk_mark; in inet_request_mark()
121 static inline int inet_request_bound_dev_if(const struct sock *sk, in inet_request_bound_dev_if() argument
125 struct net *net = sock_net(sk); in inet_request_bound_dev_if()
127 if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept) in inet_request_bound_dev_if()
131 return sk->sk_bound_dev_if; in inet_request_bound_dev_if()
[all …]
Dtls.h82 int (*hash)(struct tls_device *device, struct sock *sk);
83 void (*unhash)(struct tls_device *device, struct sock *sk);
121 void (*saved_data_ready)(struct sock *sk);
147 void (*sk_destruct)(struct sock *sk);
212 int (*push_pending_record)(struct sock *sk, int flags);
214 void (*sk_write_space)(struct sock *sk);
215 void (*sk_destruct)(struct sock *sk);
216 void (*sk_proto_close)(struct sock *sk, long timeout);
218 int (*setsockopt)(struct sock *sk, int level,
221 int (*getsockopt)(struct sock *sk, int level,
[all …]
Dip.h93 ipcm->sockc.tsflags = inet->sk.sk_tsflags; in ipcm_init_sk()
94 ipcm->oif = inet->sk.sk_bound_dev_if; in ipcm_init_sk()
125 struct sock *sk; member
153 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
162 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
163 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
164 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
167 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
168 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
170 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
[all …]
Dbusy_poll.h49 static inline bool sk_can_busy_loop(const struct sock *sk) in sk_can_busy_loop() argument
51 return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); in sk_can_busy_loop()
66 static inline bool sk_can_busy_loop(struct sock *sk) in sk_can_busy_loop() argument
98 static inline bool sk_busy_loop_timeout(struct sock *sk, in sk_busy_loop_timeout() argument
102 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); in sk_busy_loop_timeout()
114 static inline void sk_busy_loop(struct sock *sk, int nonblock) in sk_busy_loop() argument
117 unsigned int napi_id = READ_ONCE(sk->sk_napi_id); in sk_busy_loop()
120 napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk); in sk_busy_loop()
134 static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) in sk_mark_napi_id() argument
137 WRITE_ONCE(sk->sk_napi_id, skb->napi_id); in sk_mark_napi_id()
[all …]
Dtimewait_sock.h22 int (*twsk_unique)(struct sock *sk,
24 void (*twsk_destructor)(struct sock *sk);
27 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) in twsk_unique() argument
29 if (sk->sk_prot->twsk_prot->twsk_unique != NULL) in twsk_unique()
30 return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp); in twsk_unique()
34 static inline void twsk_destructor(struct sock *sk) in twsk_destructor() argument
36 if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) in twsk_destructor()
37 sk->sk_prot->twsk_prot->twsk_destructor(sk); in twsk_destructor()
Dudp.h131 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) in udp_csum_outgoing() argument
135 skb_queue_walk(&sk->sk_write_queue, skb) { in udp_csum_outgoing()
196 static inline int udp_lib_hash(struct sock *sk) in udp_lib_hash() argument
202 void udp_lib_unhash(struct sock *sk);
203 void udp_lib_rehash(struct sock *sk, u16 new_hash);
205 static inline void udp_lib_close(struct sock *sk, long timeout) in udp_lib_close() argument
207 sk_common_release(sk); in udp_lib_close()
210 int udp_lib_get_port(struct sock *sk, unsigned short snum,
251 static inline int udp_rqueue_get(struct sock *sk) in udp_rqueue_get() argument
253 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit); in udp_rqueue_get()
[all …]
Dinet_hashtables.h210 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
219 static inline int inet_sk_listen_hashfn(const struct sock *sk) in inet_sk_listen_hashfn() argument
221 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num); in inet_sk_listen_hashfn()
225 int __inet_inherit_port(const struct sock *sk, struct sock *child);
227 void inet_put_port(struct sock *sk);
235 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
236 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
238 int __inet_hash(struct sock *sk, struct sock *osk);
239 int inet_hash(struct sock *sk);
240 void inet_unhash(struct sock *sk);
[all …]
Droute.h45 #define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) argument
46 #define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE)) argument
128 const struct sock *sk);
150 struct sock *sk, in ip_route_output_ports() argument
155 flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, in ip_route_output_ports()
157 sk ? inet_sk_flowi_flags(sk) : 0, in ip_route_output_ports()
158 daddr, saddr, dport, sport, sock_net_uid(net, sk)); in ip_route_output_ports()
159 if (sk) in ip_route_output_ports()
160 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); in ip_route_output_ports()
161 return ip_route_output_flow(net, fl4, sk); in ip_route_output_ports()
[all …]
Dllc_conn.h34 struct sock sk; member
83 static inline struct llc_sock *llc_sk(const struct sock *sk) in llc_sk() argument
85 return (struct llc_sock *)sk; in llc_sk()
100 void llc_sk_stop_all_timers(struct sock *sk, bool sync);
101 void llc_sk_free(struct sock *sk);
103 void llc_sk_reset(struct sock *sk);
106 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
107 void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
108 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
109 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
[all …]
/linux-4.19.296/include/linux/
Dbpf-cgroup.h85 int __cgroup_bpf_run_filter_skb(struct sock *sk,
89 int __cgroup_bpf_run_filter_sk(struct sock *sk,
92 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
97 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
125 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ argument
129 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
135 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ argument
138 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
139 typeof(sk) __sk = sk_to_full_sk(sk); \
147 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \ argument
[all …]
Dsock_diag.h18 int (*get_info)(struct sk_buff *skb, struct sock *sk);
28 u64 sock_gen_cookie(struct sock *sk);
29 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
30 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
32 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
33 int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
37 enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) in sock_diag_destroy_group() argument
39 switch (sk->sk_family) { in sock_diag_destroy_group()
41 if (sk->sk_type == SOCK_RAW) in sock_diag_destroy_group()
44 switch (sk->sk_protocol) { in sock_diag_destroy_group()
[all …]
Dudp.h45 #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
46 #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
47 #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
73 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
74 void (*encap_destroy)(struct sock *sk);
77 struct sk_buff * (*gro_receive)(struct sock *sk,
80 int (*gro_complete)(struct sock *sk,
93 static inline struct udp_sock *udp_sk(const struct sock *sk) in udp_sk() argument
95 return (struct udp_sock *)sk; in udp_sk()
98 static inline void udp_set_no_check6_tx(struct sock *sk, bool val) in udp_set_no_check6_tx() argument
[all …]
/linux-4.19.296/drivers/isdn/mISDN/
Dsocket.c31 #define _pms(sk) ((struct mISDN_sock *)sk) argument
55 mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_link() argument
58 sk_add_node(sk, &l->head); in mISDN_sock_link()
62 static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) in mISDN_sock_unlink() argument
65 sk_del_node_init(sk); in mISDN_sock_unlink()
78 if (msk->sk.sk_state == MISDN_CLOSED) in mISDN_send()
81 err = sock_queue_rcv_skb(&msk->sk, skb); in mISDN_send()
97 msk->sk.sk_state = MISDN_CLOSED; in mISDN_ctrl()
104 mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) in mISDN_sock_cmsg() argument
108 if (_pms(sk)->cmask & MISDN_TIME_STAMP) { in mISDN_sock_cmsg()
[all …]
/linux-4.19.296/include/crypto/
Dif_alg.h33 struct sock sk; member
54 int (*accept)(void *private, struct sock *sk);
55 int (*accept_nokey)(void *private, struct sock *sk);
102 struct sock *sk; member
167 void af_alg_release_parent(struct sock *sk);
168 int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
176 static inline struct alg_sock *alg_sk(struct sock *sk) in alg_sk() argument
178 return (struct alg_sock *)sk; in alg_sk()
187 static inline int af_alg_sndbuf(struct sock *sk) in af_alg_sndbuf() argument
189 struct alg_sock *ask = alg_sk(sk); in af_alg_sndbuf()
[all …]
/linux-4.19.296/crypto/
Daf_alg.c125 if (sock->sk) { in af_alg_release()
126 sock_put(sock->sk); in af_alg_release()
127 sock->sk = NULL; in af_alg_release()
133 void af_alg_release_parent(struct sock *sk) in af_alg_release_parent() argument
135 struct alg_sock *ask = alg_sk(sk); in af_alg_release_parent()
138 sk = ask->parent; in af_alg_release_parent()
139 ask = alg_sk(sk); in af_alg_release_parent()
145 sock_put(sk); in af_alg_release_parent()
152 struct sock *sk = sock->sk; in alg_bind() local
153 struct alg_sock *ask = alg_sk(sk); in alg_bind()
[all …]
Dalgif_hash.c37 static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) in hash_alloc_result() argument
46 ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); in hash_alloc_result()
55 static void hash_free_result(struct sock *sk, struct hash_ctx *ctx) in hash_free_result() argument
64 sock_kzfree_s(sk, ctx->result, ds); in hash_free_result()
72 struct sock *sk = sock->sk; in hash_sendmsg() local
73 struct alg_sock *ask = alg_sk(sk); in hash_sendmsg()
78 if (limit > sk->sk_sndbuf) in hash_sendmsg()
79 limit = sk->sk_sndbuf; in hash_sendmsg()
81 lock_sock(sk); in hash_sendmsg()
84 hash_free_result(sk, ctx); in hash_sendmsg()
[all …]
Dalgif_skcipher.c44 struct sock *sk = sock->sk; in skcipher_sendmsg() local
45 struct alg_sock *ask = alg_sk(sk); in skcipher_sendmsg()
57 struct sock *sk = sock->sk; in _skcipher_recvmsg() local
58 struct alg_sock *ask = alg_sk(sk); in _skcipher_recvmsg()
69 err = af_alg_wait_for_data(sk, flags); in _skcipher_recvmsg()
75 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + in _skcipher_recvmsg()
81 err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); in _skcipher_recvmsg()
96 areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); in _skcipher_recvmsg()
99 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), in _skcipher_recvmsg()
107 af_alg_pull_tsgl(sk, len, areq->tsgl, 0); in _skcipher_recvmsg()
[all …]
Dalgif_aead.c48 static inline bool aead_sufficient_data(struct sock *sk) in aead_sufficient_data() argument
50 struct alg_sock *ask = alg_sk(sk); in aead_sufficient_data()
67 struct sock *sk = sock->sk; in aead_sendmsg() local
68 struct alg_sock *ask = alg_sk(sk); in aead_sendmsg()
95 struct sock *sk = sock->sk; in _aead_recvmsg() local
96 struct alg_sock *ask = alg_sk(sk); in _aead_recvmsg()
114 err = af_alg_wait_for_data(sk, flags); in _aead_recvmsg()
134 if (!aead_sufficient_data(sk)) in _aead_recvmsg()
157 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + in _aead_recvmsg()
163 err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages); in _aead_recvmsg()
[all …]
/linux-4.19.296/include/trace/events/
Dtcp.h28 if (sk->sk_family == AF_INET6) { \
52 TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
54 TP_ARGS(sk, skb),
68 struct inet_sock *inet = inet_sk(sk);
72 __entry->skaddr = sk;
84 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
94 TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
96 TP_ARGS(sk, skb)
105 TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
107 TP_ARGS(sk, skb)
[all …]
Dsock.h72 TP_PROTO(struct sock *sk, struct sk_buff *skb),
74 TP_ARGS(sk, skb),
83 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
85 __entry->sk_rcvbuf = sk->sk_rcvbuf;
94 TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
96 TP_ARGS(sk, prot, allocated, kind),
116 __entry->sysctl_rmem = sk_get_rmem0(sk, prot);
117 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
118 __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
119 __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
[all …]

123456