Lines Matching refs:sk

54 void tcp_time_wait(struct sock *sk, int state, int timeo);
259 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure() argument
261 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in tcp_under_memory_pressure()
262 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in tcp_under_memory_pressure()
284 static inline bool tcp_out_of_memory(struct sock *sk) in tcp_out_of_memory() argument
286 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && in tcp_out_of_memory()
287 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) in tcp_out_of_memory()
292 void sk_forced_mem_schedule(struct sock *sk, int size);
294 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) in tcp_too_many_orphans() argument
296 struct percpu_counter *ocp = sk->sk_prot->orphan_count; in tcp_too_many_orphans()
307 bool tcp_check_oom(struct sock *sk, int shift);
321 void tcp_shutdown(struct sock *sk, int how);
327 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
328 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
329 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
331 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
333 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
335 void tcp_release_cb(struct sock *sk);
337 void tcp_write_timer_handler(struct sock *sk);
338 void tcp_delack_timer_handler(struct sock *sk);
339 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
340 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
341 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
342 void tcp_rcv_space_adjust(struct sock *sk);
343 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
344 void tcp_twsk_destructor(struct sock *sk);
345 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
349 static inline void tcp_dec_quickack_mode(struct sock *sk) in tcp_dec_quickack_mode() argument
351 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
355 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0; in tcp_dec_quickack_mode()
382 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
387 void tcp_enter_loss(struct sock *sk);
388 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
390 void tcp_update_metrics(struct sock *sk);
391 void tcp_init_metrics(struct sock *sk);
394 void __tcp_close(struct sock *sk, long timeout);
395 void tcp_close(struct sock *sk, long timeout);
396 void tcp_init_sock(struct sock *sk);
397 void tcp_init_transfer(struct sock *sk, int bpf_op);
400 int tcp_getsockopt(struct sock *sk, int level, int optname,
402 int tcp_setsockopt(struct sock *sk, int level, int optname,
404 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
406 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
408 void tcp_set_keepalive(struct sock *sk, int val);
410 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
412 int tcp_set_rcvlowat(struct sock *sk, int val);
413 void tcp_data_ready(struct sock *sk);
425 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
426 void tcp_v4_mtu_reduced(struct sock *sk);
427 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
428 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
429 struct sock *tcp_create_openreq_child(const struct sock *sk,
432 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
433 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
438 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
439 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
440 int tcp_connect(struct sock *sk);
446 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
450 int tcp_disconnect(struct sock *sk, int flags);
452 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
453 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
454 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
457 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
462 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
480 static inline void tcp_synq_overflow(const struct sock *sk) in tcp_synq_overflow() argument
485 if (sk->sk_reuseport) { in tcp_synq_overflow()
488 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_overflow()
498 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_overflow()
500 WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now); in tcp_synq_overflow()
504 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) in tcp_synq_no_recent_overflow() argument
509 if (sk->sk_reuseport) { in tcp_synq_no_recent_overflow()
512 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_no_recent_overflow()
521 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_no_recent_overflow()
554 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
562 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
564 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
565 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
566 void tcp_retransmit_timer(struct sock *sk);
569 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
575 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
582 void tcp_send_fin(struct sock *sk);
583 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
586 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
587 void tcp_send_ack(struct sock *sk);
588 void tcp_send_delayed_ack(struct sock *sk);
589 void tcp_send_loss_probe(struct sock *sk);
590 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
595 void tcp_rearm_rto(struct sock *sk);
596 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
597 void tcp_reset(struct sock *sk);
599 void tcp_fin(struct sock *sk);
600 void tcp_check_space(struct sock *sk);
604 static inline void tcp_clear_xmit_timers(struct sock *sk) in tcp_clear_xmit_timers() argument
606 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) in tcp_clear_xmit_timers()
607 __sock_put(sk); in tcp_clear_xmit_timers()
609 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) in tcp_clear_xmit_timers()
610 __sock_put(sk); in tcp_clear_xmit_timers()
612 inet_csk_clear_xmit_timers(sk); in tcp_clear_xmit_timers()
615 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
616 unsigned int tcp_current_mss(struct sock *sk);
645 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
648 void tcp_initialize_rcv_mss(struct sock *sk);
650 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
651 int tcp_mss_to_mtu(struct sock *sk, int mss);
652 void tcp_mtup_init(struct sock *sk);
653 void tcp_init_buffer_space(struct sock *sk);
655 static inline void tcp_bound_rto(const struct sock *sk) in tcp_bound_rto() argument
657 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto()
658 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto()
678 static inline void tcp_fast_path_check(struct sock *sk) in tcp_fast_path_check() argument
680 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check()
684 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in tcp_fast_path_check()
690 static inline u32 tcp_rto_min(struct sock *sk) in tcp_rto_min() argument
692 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_rto_min()
700 static inline u32 tcp_rto_min_us(struct sock *sk) in tcp_rto_min_us() argument
702 return jiffies_to_usecs(tcp_rto_min(sk)); in tcp_rto_min_us()
733 u32 __tcp_select_window(struct sock *sk);
735 void tcp_send_window_probe(struct sock *sk);
1029 void (*init)(struct sock *sk);
1031 void (*release)(struct sock *sk);
1034 u32 (*ssthresh)(struct sock *sk);
1036 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1038 void (*set_state)(struct sock *sk, u8 new_state);
1040 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1042 void (*in_ack_event)(struct sock *sk, u32 flags);
1044 u32 (*undo_cwnd)(struct sock *sk);
1046 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1048 u32 (*min_tso_segs)(struct sock *sk);
1050 u32 (*sndbuf_expand)(struct sock *sk);
1054 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1056 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1066 void tcp_assign_congestion_control(struct sock *sk);
1067 void tcp_init_congestion_control(struct sock *sk);
1068 void tcp_cleanup_congestion_control(struct sock *sk);
1074 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1079 u32 tcp_reno_ssthresh(struct sock *sk);
1080 u32 tcp_reno_undo_cwnd(struct sock *sk);
1081 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1095 static inline bool tcp_ca_needs_ecn(const struct sock *sk) in tcp_ca_needs_ecn() argument
1097 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1102 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) in tcp_set_ca_state() argument
1104 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state()
1107 icsk->icsk_ca_ops->set_state(sk, ca_state); in tcp_set_ca_state()
1111 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) in tcp_ca_event() argument
1113 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1116 icsk->icsk_ca_ops->cwnd_event(sk, event); in tcp_ca_event()
1120 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1121 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1123 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1125 void tcp_rate_check_app_limited(struct sock *sk);
1180 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) in tcp_in_cwnd_reduction() argument
1183 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1190 static inline __u32 tcp_current_ssthresh(const struct sock *sk) in tcp_current_ssthresh() argument
1192 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh()
1194 if (tcp_in_cwnd_reduction(sk)) in tcp_current_ssthresh()
1205 void tcp_enter_cwr(struct sock *sk);
1235 static inline bool tcp_is_cwnd_limited(const struct sock *sk) in tcp_is_cwnd_limited() argument
1237 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited()
1255 static inline bool tcp_needs_internal_pacing(const struct sock *sk) in tcp_needs_internal_pacing() argument
1257 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; in tcp_needs_internal_pacing()
1266 static inline unsigned long tcp_probe0_base(const struct sock *sk) in tcp_probe0_base() argument
1268 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1272 static inline unsigned long tcp_probe0_when(const struct sock *sk, in tcp_probe0_when() argument
1275 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; in tcp_probe0_when()
1280 static inline void tcp_check_probe_timer(struct sock *sk) in tcp_check_probe_timer() argument
1282 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer()
1283 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_check_probe_timer()
1284 tcp_probe0_base(sk), TCP_RTO_MAX); in tcp_check_probe_timer()
1317 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1318 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1329 void tcp_set_state(struct sock *sk, int state);
1331 void tcp_done(struct sock *sk);
1333 int tcp_abort(struct sock *sk, int err);
1342 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1344 static inline void tcp_slow_start_after_idle_check(struct sock *sk) in tcp_slow_start_after_idle_check() argument
1346 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_slow_start_after_idle_check()
1347 struct tcp_sock *tp = tcp_sk(sk); in tcp_slow_start_after_idle_check()
1350 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || in tcp_slow_start_after_idle_check()
1354 if (delta > inet_csk(sk)->icsk_rto) in tcp_slow_start_after_idle_check()
1355 tcp_cwnd_restart(sk, delta); in tcp_slow_start_after_idle_check()
1359 void tcp_select_initial_window(const struct sock *sk, int __space,
1364 static inline int tcp_win_from_space(const struct sock *sk, int space) in tcp_win_from_space() argument
1366 int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale); in tcp_win_from_space()
1374 static inline int tcp_space(const struct sock *sk) in tcp_space() argument
1376 return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len - in tcp_space()
1377 atomic_read(&sk->sk_rmem_alloc)); in tcp_space()
1380 static inline int tcp_full_space(const struct sock *sk) in tcp_full_space() argument
1382 return tcp_win_from_space(sk, sk->sk_rcvbuf); in tcp_full_space()
1390 static inline bool tcp_rmem_pressure(const struct sock *sk) in tcp_rmem_pressure() argument
1394 if (tcp_under_memory_pressure(sk)) in tcp_rmem_pressure()
1397 rcvbuf = READ_ONCE(sk->sk_rcvbuf); in tcp_rmem_pressure()
1400 return atomic_read(&sk->sk_rmem_alloc) > threshold; in tcp_rmem_pressure()
1407 void tcp_enter_memory_pressure(struct sock *sk);
1408 void tcp_leave_memory_pressure(struct sock *sk);
1439 static inline int tcp_fin_time(const struct sock *sk) in tcp_fin_time() argument
1441 int fin_timeout = tcp_sk(sk)->linger2 ? : in tcp_fin_time()
1442 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); in tcp_fin_time()
1443 const int rto = inet_csk(sk)->icsk_rto; in tcp_fin_time()
1572 const struct sock *sk, const struct sk_buff *skb);
1573 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1576 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1578 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1582 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1587 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, in tcp_md5_do_lookup() argument
1610 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1612 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1623 void tcp_fastopen_destroy_cipher(struct sock *sk);
1625 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1627 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1628 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1633 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1635 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1646 void tcp_fastopen_active_disable(struct sock *sk);
1647 bool tcp_fastopen_active_should_disable(struct sock *sk);
1648 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1649 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1662 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1663 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1682 void tcp_write_queue_purge(struct sock *sk);
1684 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) in tcp_rtx_queue_head() argument
1686 return skb_rb_first(&sk->tcp_rtx_queue); in tcp_rtx_queue_head()
1689 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) in tcp_rtx_queue_tail() argument
1691 return skb_rb_last(&sk->tcp_rtx_queue); in tcp_rtx_queue_tail()
1694 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) in tcp_write_queue_head() argument
1696 return skb_peek(&sk->sk_write_queue); in tcp_write_queue_head()
1699 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) in tcp_write_queue_tail() argument
1701 return skb_peek_tail(&sk->sk_write_queue); in tcp_write_queue_tail()
1704 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ argument
1705 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1707 static inline struct sk_buff *tcp_send_head(const struct sock *sk) in tcp_send_head() argument
1709 return skb_peek(&sk->sk_write_queue); in tcp_send_head()
1712 static inline bool tcp_skb_is_last(const struct sock *sk, in tcp_skb_is_last() argument
1715 return skb_queue_is_last(&sk->sk_write_queue, skb); in tcp_skb_is_last()
1718 static inline bool tcp_write_queue_empty(const struct sock *sk) in tcp_write_queue_empty() argument
1720 return skb_queue_empty(&sk->sk_write_queue); in tcp_write_queue_empty()
1723 static inline bool tcp_rtx_queue_empty(const struct sock *sk) in tcp_rtx_queue_empty() argument
1725 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); in tcp_rtx_queue_empty()
1728 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) in tcp_rtx_and_write_queues_empty() argument
1730 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); in tcp_rtx_and_write_queues_empty()
1733 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) in tcp_check_send_head() argument
1735 if (tcp_write_queue_empty(sk)) in tcp_check_send_head()
1736 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); in tcp_check_send_head()
1739 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in __tcp_add_write_queue_tail() argument
1741 __skb_queue_tail(&sk->sk_write_queue, skb); in __tcp_add_write_queue_tail()
1744 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in tcp_add_write_queue_tail() argument
1746 __tcp_add_write_queue_tail(sk, skb); in tcp_add_write_queue_tail()
1749 if (sk->sk_write_queue.next == skb) in tcp_add_write_queue_tail()
1750 tcp_chrono_start(sk, TCP_CHRONO_BUSY); in tcp_add_write_queue_tail()
1756 struct sock *sk) in tcp_insert_write_queue_before() argument
1758 __skb_queue_before(&sk->sk_write_queue, skb, new); in tcp_insert_write_queue_before()
1761 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) in tcp_unlink_write_queue() argument
1764 __skb_unlink(skb, &sk->sk_write_queue); in tcp_unlink_write_queue()
1769 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink() argument
1772 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
1775 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink_and_free() argument
1778 tcp_rtx_queue_unlink(skb, sk); in tcp_rtx_queue_unlink_and_free()
1779 sk_wmem_free_skb(sk, skb); in tcp_rtx_queue_unlink_and_free()
1782 static inline void tcp_push_pending_frames(struct sock *sk) in tcp_push_pending_frames() argument
1784 if (tcp_send_head(sk)) { in tcp_push_pending_frames()
1785 struct tcp_sock *tp = tcp_sk(sk); in tcp_push_pending_frames()
1787 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); in tcp_push_pending_frames()
1806 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) in tcp_advance_highest_sack() argument
1808 tcp_sk(sk)->highest_sack = skb_rb_next(skb); in tcp_advance_highest_sack()
1811 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) in tcp_highest_sack() argument
1813 return tcp_sk(sk)->highest_sack; in tcp_highest_sack()
1816 static inline void tcp_highest_sack_reset(struct sock *sk) in tcp_highest_sack_reset() argument
1818 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); in tcp_highest_sack_reset()
1822 static inline void tcp_highest_sack_replace(struct sock *sk, in tcp_highest_sack_replace() argument
1826 if (old == tcp_highest_sack(sk)) in tcp_highest_sack_replace()
1827 tcp_sk(sk)->highest_sack = new; in tcp_highest_sack_replace()
1831 static inline bool inet_sk_transparent(const struct sock *sk) in inet_sk_transparent() argument
1833 switch (sk->sk_state) { in inet_sk_transparent()
1835 return inet_twsk(sk)->tw_transparent; in inet_sk_transparent()
1837 return inet_rsk(inet_reqsk(sk))->no_srccheck; in inet_sk_transparent()
1839 return inet_sk(sk)->transparent; in inet_sk_transparent()
1875 void tcp_v4_destroy_sock(struct sock *sk);
1898 static inline bool tcp_stream_memory_free(const struct sock *sk, int wake) in tcp_stream_memory_free() argument
1900 const struct tcp_sock *tp = tcp_sk(sk); in tcp_stream_memory_free()
1911 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1914 struct sock *sk, struct sk_buff *skb);
1919 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
1923 const struct sock *sk,
1925 int (*md5_parse)(struct sock *sk,
1935 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1939 const struct sock *sk,
1949 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1953 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1966 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
1969 tcp_synq_overflow(sk); in cookie_init_sequence()
1970 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); in cookie_init_sequence()
1975 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
1988 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
1989 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
1992 extern bool tcp_rack_mark_lost(struct sock *sk);
1995 extern void tcp_rack_reo_timeout(struct sock *sk);
1996 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
1999 static inline s64 tcp_rto_delta_us(const struct sock *sk) in tcp_rto_delta_us() argument
2001 const struct sk_buff *skb = tcp_rtx_queue_head(sk); in tcp_rto_delta_us()
2002 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rto_delta_us()
2005 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; in tcp_rto_delta_us()
2044 static inline int tcp_inq(struct sock *sk) in tcp_inq() argument
2046 struct tcp_sock *tp = tcp_sk(sk); in tcp_inq()
2049 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_inq()
2051 } else if (sock_flag(sk, SOCK_URGINLINE) || in tcp_inq()
2059 if (answ && sock_flag(sk, SOCK_DONE)) in tcp_inq()
2087 static inline void tcp_listendrop(const struct sock *sk) in tcp_listendrop() argument
2089 atomic_inc(&((struct sock *)sk)->sk_drops); in tcp_listendrop()
2090 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_listendrop()
2112 int (*init)(struct sock *sk);
2114 void (*release)(struct sock *sk);
2123 int tcp_set_ulp(struct sock *sk, const char *name);
2124 int tcp_set_ulp_id(struct sock *sk, const int ulp);
2126 void tcp_cleanup_ulp(struct sock *sk);
2138 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2144 if (sk_fullsock(sk)) { in tcp_call_bpf()
2146 sock_owned_by_me(sk); in tcp_call_bpf()
2149 sock_ops.sk = sk; in tcp_call_bpf()
2162 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2166 return tcp_call_bpf(sk, op, 2, args); in tcp_call_bpf_2arg()
2169 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2174 return tcp_call_bpf(sk, op, 3, args); in tcp_call_bpf_3arg()
2178 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2183 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2188 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2196 static inline u32 tcp_timeout_init(struct sock *sk) in tcp_timeout_init() argument
2200 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); in tcp_timeout_init()
2207 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) in tcp_rwnd_init_bpf() argument
2211 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); in tcp_rwnd_init_bpf()
2218 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) in tcp_bpf_ca_needs_ecn() argument
2220 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); in tcp_bpf_ca_needs_ecn()
2229 void (*cad)(struct sock *sk, u32 ack_seq));