The Pedigree Project  0.1
tcp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
35 /*
36  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
37  * All rights reserved.
38  *
39  * Redistribution and use in source and binary forms, with or without modification,
40  * are permitted provided that the following conditions are met:
41  *
42  * 1. Redistributions of source code must retain the above copyright notice,
43  * this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright notice,
45  * this list of conditions and the following disclaimer in the documentation
46  * and/or other materials provided with the distribution.
47  * 3. The name of the author may not be used to endorse or promote products
48  * derived from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
51  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
52  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
53  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
54  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
55  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
58  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
59  * OF SUCH DAMAGE.
60  *
61  * This file is part of the lwIP TCP/IP stack.
62  *
63  * Author: Adam Dunkels <adam@sics.se>
64  *
65  */
66 
67 #include "lwip/opt.h"
68 
69 #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
70 
71 #include "lwip/def.h"
72 #include "lwip/mem.h"
73 #include "lwip/memp.h"
74 #include "lwip/tcp.h"
75 #include "lwip/priv/tcp_priv.h"
76 #include "lwip/debug.h"
77 #include "lwip/stats.h"
78 #include "lwip/ip6.h"
79 #include "lwip/ip6_addr.h"
80 #include "lwip/nd6.h"
81 
82 #include <string.h>
83 
84 #ifdef LWIP_HOOK_FILENAME
85 #include LWIP_HOOK_FILENAME
86 #endif
87 
88 #ifndef TCP_LOCAL_PORT_RANGE_START
89 /* From http://www.iana.org/assignments/port-numbers:
90  "The Dynamic and/or Private Ports are those from 49152 through 65535" */
91 #define TCP_LOCAL_PORT_RANGE_START 0xc000
92 #define TCP_LOCAL_PORT_RANGE_END 0xffff
93 #define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & ~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START))
94 #endif
95 
96 #if LWIP_TCP_KEEPALIVE
97 #define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl)
98 #define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl)
99 #else /* LWIP_TCP_KEEPALIVE */
100 #define TCP_KEEP_DUR(pcb) TCP_MAXIDLE
101 #define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT
102 #endif /* LWIP_TCP_KEEPALIVE */
103 
104 /* As initial send MSS, we use TCP_MSS but limit it to 536. */
105 #if TCP_MSS > 536
106 #define INITIAL_MSS 536
107 #else
108 #define INITIAL_MSS TCP_MSS
109 #endif
110 
111 static const char * const tcp_state_str[] = {
112  "CLOSED",
113  "LISTEN",
114  "SYN_SENT",
115  "SYN_RCVD",
116  "ESTABLISHED",
117  "FIN_WAIT_1",
118  "FIN_WAIT_2",
119  "CLOSE_WAIT",
120  "CLOSING",
121  "LAST_ACK",
122  "TIME_WAIT"
123 };
124 
125 /* last local TCP port */
126 static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START;
127 
128 /* Incremented every coarse grained timer shot (typically every 500 ms). */
129 u32_t tcp_ticks;
130 static const u8_t tcp_backoff[13] =
131  { 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7};
132  /* Times per slowtmr hits */
133 static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 };
134 
135 /* The TCP PCB lists. */
136 
138 struct tcp_pcb *tcp_bound_pcbs;
140 union tcp_listen_pcbs_t tcp_listen_pcbs;
143 struct tcp_pcb *tcp_active_pcbs;
145 struct tcp_pcb *tcp_tw_pcbs;
146 
148 struct tcp_pcb ** const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs,
149  &tcp_active_pcbs, &tcp_tw_pcbs};
150 
151 u8_t tcp_active_pcbs_changed;
152 
154 static u8_t tcp_timer;
155 static u8_t tcp_timer_ctr;
156 static u16_t tcp_new_port(void);
157 
158 static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb);
159 
163 void
164 tcp_init(void)
165 {
166 #if LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND)
167  tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND());
168 #endif /* LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) */
169 }
170 
174 void
175 tcp_tmr(void)
176 {
177  /* Call tcp_fasttmr() every 250 ms */
178  tcp_fasttmr();
179 
180  if (++tcp_timer & 1) {
181  /* Call tcp_slowtmr() every 500 ms, i.e., every other timer
182  tcp_tmr() is called. */
183  tcp_slowtmr();
184  }
185 }
186 
187 #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
188 
191 static void
192 tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb)
193 {
194  struct tcp_pcb *pcb;
195  for (pcb = list; pcb != NULL; pcb = pcb->next) {
196  if (pcb->listener == lpcb) {
197  pcb->listener = NULL;
198  }
199  }
200 }
201 #endif
202 
206 static void
207 tcp_listen_closed(struct tcp_pcb *pcb)
208 {
209 #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG
210  size_t i;
211  LWIP_ASSERT("pcb != NULL", pcb != NULL);
212  LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN);
213  for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) {
214  tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen*)pcb);
215  }
216 #endif
217  LWIP_UNUSED_ARG(pcb);
218 }
219 
220 #if TCP_LISTEN_BACKLOG
221 
231 void
232 tcp_backlog_delayed(struct tcp_pcb* pcb)
233 {
234  LWIP_ASSERT("pcb != NULL", pcb != NULL);
235  if ((pcb->flags & TF_BACKLOGPEND) == 0) {
236  if (pcb->listener != NULL) {
237  pcb->listener->accepts_pending++;
238  LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0);
239  pcb->flags |= TF_BACKLOGPEND;
240  }
241  }
242 }
243 
253 void
254 tcp_backlog_accepted(struct tcp_pcb* pcb)
255 {
256  LWIP_ASSERT("pcb != NULL", pcb != NULL);
257  if ((pcb->flags & TF_BACKLOGPEND) != 0) {
258  if (pcb->listener != NULL) {
259  LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0);
260  pcb->listener->accepts_pending--;
261  pcb->flags &= ~TF_BACKLOGPEND;
262  }
263  }
264 }
265 #endif /* TCP_LISTEN_BACKLOG */
266 
283 static err_t
284 tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data)
285 {
286  if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) {
287  if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) {
288  /* Not all data received by application, send RST to tell the remote
289  side about this. */
290  LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED);
291 
292  /* don't call tcp_abort here: we must not deallocate the pcb since
293  that might not be expected when calling tcp_close */
294  tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
295  pcb->local_port, pcb->remote_port);
296 
297  tcp_pcb_purge(pcb);
298  TCP_RMV_ACTIVE(pcb);
299  if (pcb->state == ESTABLISHED) {
300  /* move to TIME_WAIT since we close actively */
301  pcb->state = TIME_WAIT;
302  TCP_REG(&tcp_tw_pcbs, pcb);
303  } else {
304  /* CLOSE_WAIT: deallocate the pcb since we already sent a RST for it */
305  if (tcp_input_pcb == pcb) {
306  /* prevent using a deallocated pcb: free it from tcp_input later */
307  tcp_trigger_input_pcb_close();
308  } else {
309  memp_free(MEMP_TCP_PCB, pcb);
310  }
311  }
312  return ERR_OK;
313  }
314  }
315 
316  /* - states which free the pcb are handled here,
317  - states which send FIN and change state are handled in tcp_close_shutdown_fin() */
318  switch (pcb->state) {
319  case CLOSED:
320  /* Closing a pcb in the CLOSED state might seem erroneous,
321  * however, it is in this state once allocated and as yet unused
322  * and the user needs some way to free it should the need arise.
323  * Calling tcp_close() with a pcb that has already been closed, (i.e. twice)
324  * or for a pcb that has been used and then entered the CLOSED state
325  * is erroneous, but this should never happen as the pcb has in those cases
326  * been freed, and so any remaining handles are bogus. */
327  if (pcb->local_port != 0) {
328  TCP_RMV(&tcp_bound_pcbs, pcb);
329  }
330  memp_free(MEMP_TCP_PCB, pcb);
331  break;
332  case LISTEN:
333  tcp_listen_closed(pcb);
334  tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb);
335  memp_free(MEMP_TCP_PCB_LISTEN, pcb);
336  break;
337  case SYN_SENT:
338  TCP_PCB_REMOVE_ACTIVE(pcb);
339  memp_free(MEMP_TCP_PCB, pcb);
340  MIB2_STATS_INC(mib2.tcpattemptfails);
341  break;
342  default:
343  return tcp_close_shutdown_fin(pcb);
344  }
345  return ERR_OK;
346 }
347 
348 static err_t
349 tcp_close_shutdown_fin(struct tcp_pcb *pcb)
350 {
351  err_t err;
352  LWIP_ASSERT("pcb != NULL", pcb != NULL);
353 
354  switch (pcb->state) {
355  case SYN_RCVD:
356  err = tcp_send_fin(pcb);
357  if (err == ERR_OK) {
358  tcp_backlog_accepted(pcb);
359  MIB2_STATS_INC(mib2.tcpattemptfails);
360  pcb->state = FIN_WAIT_1;
361  }
362  break;
363  case ESTABLISHED:
364  err = tcp_send_fin(pcb);
365  if (err == ERR_OK) {
366  MIB2_STATS_INC(mib2.tcpestabresets);
367  pcb->state = FIN_WAIT_1;
368  }
369  break;
370  case CLOSE_WAIT:
371  err = tcp_send_fin(pcb);
372  if (err == ERR_OK) {
373  MIB2_STATS_INC(mib2.tcpestabresets);
374  pcb->state = LAST_ACK;
375  }
376  break;
377  default:
378  /* Has already been closed, do nothing. */
379  return ERR_OK;
380  break;
381  }
382 
383  if (err == ERR_OK) {
384  /* To ensure all data has been sent when tcp_close returns, we have
385  to make sure tcp_output doesn't fail.
386  Since we don't really have to ensure all data has been sent when tcp_close
387  returns (unsent data is sent from tcp timer functions, also), we don't care
388  for the return value of tcp_output for now. */
389  tcp_output(pcb);
390  } else if (err == ERR_MEM) {
391  /* Mark this pcb for closing. Closing is retried from tcp_tmr. */
392  pcb->flags |= TF_CLOSEPEND;
393  }
394  return err;
395 }
396 
412 err_t
413 tcp_close(struct tcp_pcb *pcb)
414 {
415  LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in "));
416  tcp_debug_print_state(pcb->state);
417 
418  if (pcb->state != LISTEN) {
419  /* Set a flag not to receive any more data... */
420  pcb->flags |= TF_RXCLOSED;
421  }
422  /* ... and close */
423  return tcp_close_shutdown(pcb, 1);
424 }
425 
439 err_t
440 tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx)
441 {
442  if (pcb->state == LISTEN) {
443  return ERR_CONN;
444  }
445  if (shut_rx) {
446  /* shut down the receive side: set a flag not to receive any more data... */
447  pcb->flags |= TF_RXCLOSED;
448  if (shut_tx) {
449  /* shutting down the tx AND rx side is the same as closing for the raw API */
450  return tcp_close_shutdown(pcb, 1);
451  }
452  /* ... and free buffered data */
453  if (pcb->refused_data != NULL) {
454  pbuf_free(pcb->refused_data);
455  pcb->refused_data = NULL;
456  }
457  }
458  if (shut_tx) {
459  /* This can't happen twice since if it succeeds, the pcb's state is changed.
460  Only close in these states as the others directly deallocate the PCB */
461  switch (pcb->state) {
462  case SYN_RCVD:
463  case ESTABLISHED:
464  case CLOSE_WAIT:
465  return tcp_close_shutdown(pcb, (u8_t)shut_rx);
466  default:
467  /* Not (yet?) connected, cannot shutdown the TX side as that would bring us
468  into CLOSED state, where the PCB is deallocated. */
469  return ERR_CONN;
470  }
471  }
472  return ERR_OK;
473 }
474 
483 void
484 tcp_abandon(struct tcp_pcb *pcb, int reset)
485 {
486  u32_t seqno, ackno;
487 #if LWIP_CALLBACK_API
488  tcp_err_fn errf;
489 #endif /* LWIP_CALLBACK_API */
490  void *errf_arg;
491 
492  /* pcb->state LISTEN not allowed here */
493  LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs",
494  pcb->state != LISTEN);
495  /* Figure out on which TCP PCB list we are, and remove us. If we
496  are in an active state, call the receive function associated with
497  the PCB with a NULL argument, and send an RST to the remote end. */
498  if (pcb->state == TIME_WAIT) {
499  tcp_pcb_remove(&tcp_tw_pcbs, pcb);
500  memp_free(MEMP_TCP_PCB, pcb);
501  } else {
502  int send_rst = 0;
503  u16_t local_port = 0;
504  enum tcp_state last_state;
505  seqno = pcb->snd_nxt;
506  ackno = pcb->rcv_nxt;
507 #if LWIP_CALLBACK_API
508  errf = pcb->errf;
509 #endif /* LWIP_CALLBACK_API */
510  errf_arg = pcb->callback_arg;
511  if (pcb->state == CLOSED) {
512  if (pcb->local_port != 0) {
513  /* bound, not yet opened */
514  TCP_RMV(&tcp_bound_pcbs, pcb);
515  }
516  } else {
517  send_rst = reset;
518  local_port = pcb->local_port;
519  TCP_PCB_REMOVE_ACTIVE(pcb);
520  }
521  if (pcb->unacked != NULL) {
522  tcp_segs_free(pcb->unacked);
523  }
524  if (pcb->unsent != NULL) {
525  tcp_segs_free(pcb->unsent);
526  }
527 #if TCP_QUEUE_OOSEQ
528  if (pcb->ooseq != NULL) {
529  tcp_segs_free(pcb->ooseq);
530  }
531 #endif /* TCP_QUEUE_OOSEQ */
532  tcp_backlog_accepted(pcb);
533  if (send_rst) {
534  LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n"));
535  tcp_rst(seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port);
536  }
537  last_state = pcb->state;
538  memp_free(MEMP_TCP_PCB, pcb);
539  TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT);
540  }
541 }
542 
554 void
555 tcp_abort(struct tcp_pcb *pcb)
556 {
557  tcp_abandon(pcb, 1);
558 }
559 
575 err_t
576 tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port)
577 {
578  int i;
579  int max_pcb_list = NUM_TCP_PCB_LISTS;
580  struct tcp_pcb *cpcb;
581 
582 #if LWIP_IPV4
583  /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */
584  if (ipaddr == NULL) {
585  ipaddr = IP4_ADDR_ANY;
586  }
587 #endif /* LWIP_IPV4 */
588 
589  /* still need to check for ipaddr == NULL in IPv6 only case */
590  if ((pcb == NULL) || (ipaddr == NULL)) {
591  return ERR_VAL;
592  }
593 
594  LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL);
595 
596 #if SO_REUSE
597  /* Unless the REUSEADDR flag is set,
598  we have to check the pcbs in TIME-WAIT state, also.
599  We do not dump TIME_WAIT pcb's; they can still be matched by incoming
600  packets using both local and remote IP addresses and ports to distinguish.
601  */
602  if (ip_get_option(pcb, SOF_REUSEADDR)) {
603  max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT;
604  }
605 #endif /* SO_REUSE */
606 
607  if (port == 0) {
608  port = tcp_new_port();
609  if (port == 0) {
610  return ERR_BUF;
611  }
612  } else {
613  /* Check if the address already is in use (on all lists) */
614  for (i = 0; i < max_pcb_list; i++) {
615  for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
616  if (cpcb->local_port == port) {
617 #if SO_REUSE
618  /* Omit checking for the same port if both pcbs have REUSEADDR set.
619  For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in
620  tcp_connect. */
621  if (!ip_get_option(pcb, SOF_REUSEADDR) ||
622  !ip_get_option(cpcb, SOF_REUSEADDR))
623 #endif /* SO_REUSE */
624  {
625  /* @todo: check accept_any_ip_version */
626  if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) &&
627  (ip_addr_isany(&cpcb->local_ip) ||
628  ip_addr_isany(ipaddr) ||
629  ip_addr_cmp(&cpcb->local_ip, ipaddr))) {
630  return ERR_USE;
631  }
632  }
633  }
634  }
635  }
636  }
637 
638  if (!ip_addr_isany(ipaddr)) {
639  ip_addr_set(&pcb->local_ip, ipaddr);
640  }
641  pcb->local_port = port;
642  TCP_REG(&tcp_bound_pcbs, pcb);
643  LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port));
644  return ERR_OK;
645 }
646 #if LWIP_CALLBACK_API
647 
650 static err_t
651 tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err)
652 {
653  LWIP_UNUSED_ARG(arg);
654  LWIP_UNUSED_ARG(err);
655 
656  tcp_abort(pcb);
657 
658  return ERR_ABRT;
659 }
660 #endif /* LWIP_CALLBACK_API */
661 
677 struct tcp_pcb *
678 tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog)
679 {
680  return tcp_listen_with_backlog_and_err(pcb, backlog, NULL);
681 }
682 
699 struct tcp_pcb *
700 tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err)
701 {
702  struct tcp_pcb_listen *lpcb = NULL;
703  err_t res;
704 
705  LWIP_UNUSED_ARG(backlog);
706  LWIP_ERROR("tcp_listen: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done);
707 
708  /* already listening? */
709  if (pcb->state == LISTEN) {
710  lpcb = (struct tcp_pcb_listen*)pcb;
711  res = ERR_ALREADY;
712  goto done;
713  }
714 #if SO_REUSE
715  if (ip_get_option(pcb, SOF_REUSEADDR)) {
716  /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage
717  is declared (listen-/connection-pcb), we have to make sure now that
718  this port is only used once for every local IP. */
719  for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
720  if ((lpcb->local_port == pcb->local_port) &&
721  ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) {
722  /* this address/port is already used */
723  lpcb = NULL;
724  res = ERR_USE;
725  goto done;
726  }
727  }
728  }
729 #endif /* SO_REUSE */
730  lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN);
731  if (lpcb == NULL) {
732  res = ERR_MEM;
733  goto done;
734  }
735  lpcb->callback_arg = pcb->callback_arg;
736  lpcb->local_port = pcb->local_port;
737  lpcb->state = LISTEN;
738  lpcb->prio = pcb->prio;
739  lpcb->so_options = pcb->so_options;
740  lpcb->ttl = pcb->ttl;
741  lpcb->tos = pcb->tos;
742 #if LWIP_IPV4 && LWIP_IPV6
743  IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type);
744 #endif /* LWIP_IPV4 && LWIP_IPV6 */
745  ip_addr_copy(lpcb->local_ip, pcb->local_ip);
746  if (pcb->local_port != 0) {
747  TCP_RMV(&tcp_bound_pcbs, pcb);
748  }
749  memp_free(MEMP_TCP_PCB, pcb);
750 #if LWIP_CALLBACK_API
751  lpcb->accept = tcp_accept_null;
752 #endif /* LWIP_CALLBACK_API */
753 #if TCP_LISTEN_BACKLOG
754  lpcb->accepts_pending = 0;
755  tcp_backlog_set(lpcb, backlog);
756 #endif /* TCP_LISTEN_BACKLOG */
757  TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb);
758  res = ERR_OK;
759 done:
760  if (err != NULL) {
761  *err = res;
762  }
763  return (struct tcp_pcb *)lpcb;
764 }
765 
772 u32_t
773 tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb)
774 {
775  u32_t new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd;
776 
777  if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) {
778  /* we can advertise more window */
779  pcb->rcv_ann_wnd = pcb->rcv_wnd;
780  return new_right_edge - pcb->rcv_ann_right_edge;
781  } else {
782  if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) {
783  /* Can happen due to other end sending out of advertised window,
784  * but within actual available (but not yet advertised) window */
785  pcb->rcv_ann_wnd = 0;
786  } else {
787  /* keep the right edge of window constant */
788  u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt;
789 #if !LWIP_WND_SCALE
790  LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff);
791 #endif
792  pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd;
793  }
794  return 0;
795  }
796 }
797 
807 void
808 tcp_recved(struct tcp_pcb *pcb, u16_t len)
809 {
810  int wnd_inflation;
811 
812  /* pcb->state LISTEN not allowed here */
813  LWIP_ASSERT("don't call tcp_recved for listen-pcbs",
814  pcb->state != LISTEN);
815 
816  pcb->rcv_wnd += len;
817  if (pcb->rcv_wnd > TCP_WND_MAX(pcb)) {
818  pcb->rcv_wnd = TCP_WND_MAX(pcb);
819  } else if (pcb->rcv_wnd == 0) {
820  /* rcv_wnd overflowed */
821  if ((pcb->state == CLOSE_WAIT) || (pcb->state == LAST_ACK)) {
822  /* In passive close, we allow this, since the FIN bit is added to rcv_wnd
823  by the stack itself, since it is not mandatory for an application
824  to call tcp_recved() for the FIN bit, but e.g. the netconn API does so. */
825  pcb->rcv_wnd = TCP_WND_MAX(pcb);
826  } else {
827  LWIP_ASSERT("tcp_recved: len wrapped rcv_wnd\n", 0);
828  }
829  }
830 
831  wnd_inflation = tcp_update_rcv_ann_wnd(pcb);
832 
833  /* If the change in the right edge of window is significant (default
834  * watermark is TCP_WND/4), then send an explicit update now.
835  * Otherwise wait for a packet to be sent in the normal course of
836  * events (or more window to be available later) */
837  if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) {
838  tcp_ack_now(pcb);
839  tcp_output(pcb);
840  }
841 
842  LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n",
843  len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd)));
844 }
845 
851 static u16_t
852 tcp_new_port(void)
853 {
854  u8_t i;
855  u16_t n = 0;
856  struct tcp_pcb *pcb;
857 
858 again:
859  if (tcp_port++ == TCP_LOCAL_PORT_RANGE_END) {
860  tcp_port = TCP_LOCAL_PORT_RANGE_START;
861  }
862  /* Check all PCB lists. */
863  for (i = 0; i < NUM_TCP_PCB_LISTS; i++) {
864  for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) {
865  if (pcb->local_port == tcp_port) {
866  if (++n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) {
867  return 0;
868  }
869  goto again;
870  }
871  }
872  }
873  return tcp_port;
874 }
875 
890 err_t
891 tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port,
892  tcp_connected_fn connected)
893 {
894  err_t ret;
895  u32_t iss;
896  u16_t old_local_port;
897 
898  if ((pcb == NULL) || (ipaddr == NULL)) {
899  return ERR_VAL;
900  }
901 
902  LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN);
903 
904  LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port));
905  ip_addr_set(&pcb->remote_ip, ipaddr);
906  pcb->remote_port = port;
907 
908  /* check if we have a route to the remote host */
909  if (ip_addr_isany(&pcb->local_ip)) {
910  /* no local IP address set, yet. */
911  struct netif *netif;
912  const ip_addr_t *local_ip;
913  ip_route_get_local_ip(&pcb->local_ip, &pcb->remote_ip, netif, local_ip);
914  if ((netif == NULL) || (local_ip == NULL)) {
915  /* Don't even try to send a SYN packet if we have no route
916  since that will fail. */
917  return ERR_RTE;
918  }
919  /* Use the address as local address of the pcb. */
920  ip_addr_copy(pcb->local_ip, *local_ip);
921  }
922 
923  old_local_port = pcb->local_port;
924  if (pcb->local_port == 0) {
925  pcb->local_port = tcp_new_port();
926  if (pcb->local_port == 0) {
927  return ERR_BUF;
928  }
929  } else {
930 #if SO_REUSE
931  if (ip_get_option(pcb, SOF_REUSEADDR)) {
932  /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure
933  now that the 5-tuple is unique. */
934  struct tcp_pcb *cpcb;
935  int i;
936  /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */
937  for (i = 2; i < NUM_TCP_PCB_LISTS; i++) {
938  for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
939  if ((cpcb->local_port == pcb->local_port) &&
940  (cpcb->remote_port == port) &&
941  ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) &&
942  ip_addr_cmp(&cpcb->remote_ip, ipaddr)) {
943  /* linux returns EISCONN here, but ERR_USE should be OK for us */
944  return ERR_USE;
945  }
946  }
947  }
948  }
949 #endif /* SO_REUSE */
950  }
951 
952  iss = tcp_next_iss(pcb);
953  pcb->rcv_nxt = 0;
954  pcb->snd_nxt = iss;
955  pcb->lastack = iss - 1;
956  pcb->snd_wl2 = iss - 1;
957  pcb->snd_lbb = iss - 1;
958  /* Start with a window that does not need scaling. When window scaling is
959  enabled and used, the window is enlarged when both sides agree on scaling. */
960  pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND);
961  pcb->rcv_ann_right_edge = pcb->rcv_nxt;
962  pcb->snd_wnd = TCP_WND;
963  /* As initial send MSS, we use TCP_MSS but limit it to 536.
964  The send MSS is updated when an MSS option is received. */
965  pcb->mss = INITIAL_MSS;
966 #if TCP_CALCULATE_EFF_SEND_MSS
967  pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip);
968 #endif /* TCP_CALCULATE_EFF_SEND_MSS */
969  pcb->cwnd = 1;
970 #if LWIP_CALLBACK_API
971  pcb->connected = connected;
972 #else /* LWIP_CALLBACK_API */
973  LWIP_UNUSED_ARG(connected);
974 #endif /* LWIP_CALLBACK_API */
975 
976  /* Send a SYN together with the MSS option. */
977  ret = tcp_enqueue_flags(pcb, TCP_SYN);
978  if (ret == ERR_OK) {
979  /* SYN segment was enqueued, changed the pcbs state now */
980  pcb->state = SYN_SENT;
981  if (old_local_port != 0) {
982  TCP_RMV(&tcp_bound_pcbs, pcb);
983  }
984  TCP_REG_ACTIVE(pcb);
985  MIB2_STATS_INC(mib2.tcpactiveopens);
986 
987  tcp_output(pcb);
988  }
989  return ret;
990 }
991 
999 void
1000 tcp_slowtmr(void)
1001 {
1002  struct tcp_pcb *pcb, *prev;
1003  tcpwnd_size_t eff_wnd;
1004  u8_t pcb_remove; /* flag if a PCB should be removed */
1005  u8_t pcb_reset; /* flag if a RST should be sent when removing */
1006  err_t err;
1007 
1008  err = ERR_OK;
1009 
1010  ++tcp_ticks;
1011  ++tcp_timer_ctr;
1012 
1013 tcp_slowtmr_start:
1014  /* Steps through all of the active PCBs. */
1015  prev = NULL;
1016  pcb = tcp_active_pcbs;
1017  if (pcb == NULL) {
1018  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n"));
1019  }
1020  while (pcb != NULL) {
1021  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n"));
1022  LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED);
1023  LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN);
1024  LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT);
1025  if (pcb->last_timer == tcp_timer_ctr) {
1026  /* skip this pcb, we have already processed it */
1027  pcb = pcb->next;
1028  continue;
1029  }
1030  pcb->last_timer = tcp_timer_ctr;
1031 
1032  pcb_remove = 0;
1033  pcb_reset = 0;
1034 
1035  if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) {
1036  ++pcb_remove;
1037  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n"));
1038  }
1039  else if (pcb->nrtx >= TCP_MAXRTX) {
1040  ++pcb_remove;
1041  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n"));
1042  } else {
1043  if (pcb->persist_backoff > 0) {
1044  /* If snd_wnd is zero, use persist timer to send 1 byte probes
1045  * instead of using the standard retransmission mechanism. */
1046  u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff-1];
1047  if (pcb->persist_cnt < backoff_cnt) {
1048  pcb->persist_cnt++;
1049  }
1050  if (pcb->persist_cnt >= backoff_cnt) {
1051  if (tcp_zero_window_probe(pcb) == ERR_OK) {
1052  pcb->persist_cnt = 0;
1053  if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) {
1054  pcb->persist_backoff++;
1055  }
1056  }
1057  }
1058  } else {
1059  /* Increase the retransmission timer if it is running */
1060  if (pcb->rtime >= 0) {
1061  ++pcb->rtime;
1062  }
1063 
1064  if (pcb->unacked != NULL && pcb->rtime >= pcb->rto) {
1065  /* Time for a retransmission. */
1066  LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F
1067  " pcb->rto %"S16_F"\n",
1068  pcb->rtime, pcb->rto));
1069 
1070  /* Double retransmission time-out unless we are trying to
1071  * connect to somebody (i.e., we are in SYN_SENT). */
1072  if (pcb->state != SYN_SENT) {
1073  u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff)-1);
1074  pcb->rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx];
1075  }
1076 
1077  /* Reset the retransmission timer. */
1078  pcb->rtime = 0;
1079 
1080  /* Reduce congestion window and ssthresh. */
1081  eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd);
1082  pcb->ssthresh = eff_wnd >> 1;
1083  if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) {
1084  pcb->ssthresh = (pcb->mss << 1);
1085  }
1086  pcb->cwnd = pcb->mss;
1087  LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F
1088  " ssthresh %"TCPWNDSIZE_F"\n",
1089  pcb->cwnd, pcb->ssthresh));
1090 
1091  /* The following needs to be called AFTER cwnd is set to one
1092  mss - STJ */
1093  tcp_rexmit_rto(pcb);
1094  }
1095  }
1096  }
1097  /* Check if this PCB has stayed too long in FIN-WAIT-2 */
1098  if (pcb->state == FIN_WAIT_2) {
1099  /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */
1100  if (pcb->flags & TF_RXCLOSED) {
1101  /* PCB was fully closed (either through close() or SHUT_RDWR):
1102  normal FIN-WAIT timeout handling. */
1103  if ((u32_t)(tcp_ticks - pcb->tmr) >
1104  TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) {
1105  ++pcb_remove;
1106  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n"));
1107  }
1108  }
1109  }
1110 
1111  /* Check if KEEPALIVE should be sent */
1112  if (ip_get_option(pcb, SOF_KEEPALIVE) &&
1113  ((pcb->state == ESTABLISHED) ||
1114  (pcb->state == CLOSE_WAIT))) {
1115  if ((u32_t)(tcp_ticks - pcb->tmr) >
1116  (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL)
1117  {
1118  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to "));
1119  ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip);
1120  LWIP_DEBUGF(TCP_DEBUG, ("\n"));
1121 
1122  ++pcb_remove;
1123  ++pcb_reset;
1124  } else if ((u32_t)(tcp_ticks - pcb->tmr) >
1125  (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb))
1126  / TCP_SLOW_INTERVAL)
1127  {
1128  err = tcp_keepalive(pcb);
1129  if (err == ERR_OK) {
1130  pcb->keep_cnt_sent++;
1131  }
1132  }
1133  }
1134 
1135  /* If this PCB has queued out of sequence data, but has been
1136  inactive for too long, will drop the data (it will eventually
1137  be retransmitted). */
1138 #if TCP_QUEUE_OOSEQ
1139  if (pcb->ooseq != NULL &&
1140  (u32_t)tcp_ticks - pcb->tmr >= pcb->rto * TCP_OOSEQ_TIMEOUT) {
1141  tcp_segs_free(pcb->ooseq);
1142  pcb->ooseq = NULL;
1143  LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n"));
1144  }
1145 #endif /* TCP_QUEUE_OOSEQ */
1146 
1147  /* Check if this PCB has stayed too long in SYN-RCVD */
1148  if (pcb->state == SYN_RCVD) {
1149  if ((u32_t)(tcp_ticks - pcb->tmr) >
1150  TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) {
1151  ++pcb_remove;
1152  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n"));
1153  }
1154  }
1155 
1156  /* Check if this PCB has stayed too long in LAST-ACK */
1157  if (pcb->state == LAST_ACK) {
1158  if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
1159  ++pcb_remove;
1160  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n"));
1161  }
1162  }
1163 
1164  /* If the PCB should be removed, do it. */
1165  if (pcb_remove) {
1166  struct tcp_pcb *pcb2;
1167 #if LWIP_CALLBACK_API
1168  tcp_err_fn err_fn = pcb->errf;
1169 #endif /* LWIP_CALLBACK_API */
1170  void *err_arg;
1171  enum tcp_state last_state;
1172  tcp_pcb_purge(pcb);
1173  /* Remove PCB from tcp_active_pcbs list. */
1174  if (prev != NULL) {
1175  LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs);
1176  prev->next = pcb->next;
1177  } else {
1178  /* This PCB was the first. */
1179  LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb);
1180  tcp_active_pcbs = pcb->next;
1181  }
1182 
1183  if (pcb_reset) {
1184  tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
1185  pcb->local_port, pcb->remote_port);
1186  }
1187 
1188  err_arg = pcb->callback_arg;
1189  last_state = pcb->state;
1190  pcb2 = pcb;
1191  pcb = pcb->next;
1192  memp_free(MEMP_TCP_PCB, pcb2);
1193 
1194  tcp_active_pcbs_changed = 0;
1195  TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT);
1196  if (tcp_active_pcbs_changed) {
1197  goto tcp_slowtmr_start;
1198  }
1199  } else {
1200  /* get the 'next' element now and work with 'prev' below (in case of abort) */
1201  prev = pcb;
1202  pcb = pcb->next;
1203 
1204  /* We check if we should poll the connection. */
1205  ++prev->polltmr;
1206  if (prev->polltmr >= prev->pollinterval) {
1207  prev->polltmr = 0;
1208  LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n"));
1209  tcp_active_pcbs_changed = 0;
1210  TCP_EVENT_POLL(prev, err);
1211  if (tcp_active_pcbs_changed) {
1212  goto tcp_slowtmr_start;
1213  }
1214  /* if err == ERR_ABRT, 'prev' is already deallocated */
1215  if (err == ERR_OK) {
1216  tcp_output(prev);
1217  }
1218  }
1219  }
1220  }
1221 
1222 
1223  /* Steps through all of the TIME-WAIT PCBs. */
1224  prev = NULL;
1225  pcb = tcp_tw_pcbs;
1226  while (pcb != NULL) {
1227  LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
1228  pcb_remove = 0;
1229 
1230  /* Check if this PCB has stayed long enough in TIME-WAIT */
1231  if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
1232  ++pcb_remove;
1233  }
1234 
1235  /* If the PCB should be removed, do it. */
1236  if (pcb_remove) {
1237  struct tcp_pcb *pcb2;
1238  tcp_pcb_purge(pcb);
1239  /* Remove PCB from tcp_tw_pcbs list. */
1240  if (prev != NULL) {
1241  LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs);
1242  prev->next = pcb->next;
1243  } else {
1244  /* This PCB was the first. */
1245  LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb);
1246  tcp_tw_pcbs = pcb->next;
1247  }
1248  pcb2 = pcb;
1249  pcb = pcb->next;
1250  memp_free(MEMP_TCP_PCB, pcb2);
1251  } else {
1252  prev = pcb;
1253  pcb = pcb->next;
1254  }
1255  }
1256 }
1257 
1264 void
1265 tcp_fasttmr(void)
1266 {
1267  struct tcp_pcb *pcb;
1268 
1269  ++tcp_timer_ctr;
1270 
1271 tcp_fasttmr_start:
1272  pcb = tcp_active_pcbs;
1273 
1274  while (pcb != NULL) {
1275  if (pcb->last_timer != tcp_timer_ctr) {
1276  struct tcp_pcb *next;
1277  pcb->last_timer = tcp_timer_ctr;
1278  /* send delayed ACKs */
1279  if (pcb->flags & TF_ACK_DELAY) {
1280  LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n"));
1281  tcp_ack_now(pcb);
1282  tcp_output(pcb);
1283  pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
1284  }
1285  /* send pending FIN */
1286  if (pcb->flags & TF_CLOSEPEND) {
1287  LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n"));
1288  pcb->flags &= ~(TF_CLOSEPEND);
1289  tcp_close_shutdown_fin(pcb);
1290  }
1291 
1292  next = pcb->next;
1293 
1294  /* If there is data which was previously "refused" by upper layer */
1295  if (pcb->refused_data != NULL) {
1296  tcp_active_pcbs_changed = 0;
1297  tcp_process_refused_data(pcb);
1298  if (tcp_active_pcbs_changed) {
1299  /* application callback has changed the pcb list: restart the loop */
1300  goto tcp_fasttmr_start;
1301  }
1302  }
1303  pcb = next;
1304  } else {
1305  pcb = pcb->next;
1306  }
1307  }
1308 }
1309 
1311 void
1312 tcp_txnow(void)
1313 {
1314  struct tcp_pcb *pcb;
1315 
1316  for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
1317  if (pcb->flags & TF_NAGLEMEMERR) {
1318  tcp_output(pcb);
1319  }
1320  }
1321 }
1322 
1324 err_t
1325 tcp_process_refused_data(struct tcp_pcb *pcb)
1326 {
1327 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1328  struct pbuf *rest;
1329  while (pcb->refused_data != NULL)
1330 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1331  {
1332  err_t err;
1333  u8_t refused_flags = pcb->refused_data->flags;
1334  /* set pcb->refused_data to NULL in case the callback frees it and then
1335  closes the pcb */
1336  struct pbuf *refused_data = pcb->refused_data;
1337 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1338  pbuf_split_64k(refused_data, &rest);
1339  pcb->refused_data = rest;
1340 #else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1341  pcb->refused_data = NULL;
1342 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1343  /* Notify again application with data previously received. */
1344  LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n"));
1345  TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err);
1346  if (err == ERR_OK) {
1347  /* did refused_data include a FIN? */
1348  if (refused_flags & PBUF_FLAG_TCP_FIN
1349 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1350  && (rest == NULL)
1351 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1352  ) {
1353  /* correct rcv_wnd as the application won't call tcp_recved()
1354  for the FIN's seqno */
1355  if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) {
1356  pcb->rcv_wnd++;
1357  }
1358  TCP_EVENT_CLOSED(pcb, err);
1359  if (err == ERR_ABRT) {
1360  return ERR_ABRT;
1361  }
1362  }
1363  } else if (err == ERR_ABRT) {
1364  /* if err == ERR_ABRT, 'pcb' is already deallocated */
1365  /* Drop incoming packets because pcb is "full" (only if the incoming
1366  segment contains data). */
1367  LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n"));
1368  return ERR_ABRT;
1369  } else {
1370  /* data is still refused, pbuf is still valid (go on for ACK-only packets) */
1371 #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1372  if (rest != NULL) {
1373  pbuf_cat(refused_data, rest);
1374  }
1375 #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1376  pcb->refused_data = refused_data;
1377  return ERR_INPROGRESS;
1378  }
1379  }
1380  return ERR_OK;
1381 }
1382 
1388 void
1389 tcp_segs_free(struct tcp_seg *seg)
1390 {
1391  while (seg != NULL) {
1392  struct tcp_seg *next = seg->next;
1393  tcp_seg_free(seg);
1394  seg = next;
1395  }
1396 }
1397 
1403 void
1404 tcp_seg_free(struct tcp_seg *seg)
1405 {
1406  if (seg != NULL) {
1407  if (seg->p != NULL) {
1408  pbuf_free(seg->p);
1409 #if TCP_DEBUG
1410  seg->p = NULL;
1411 #endif /* TCP_DEBUG */
1412  }
1413  memp_free(MEMP_TCP_SEG, seg);
1414  }
1415 }
1416 
1423 void
1424 tcp_setprio(struct tcp_pcb *pcb, u8_t prio)
1425 {
1426  pcb->prio = prio;
1427 }
1428 
1429 #if TCP_QUEUE_OOSEQ
1430 
1437 struct tcp_seg *
1438 tcp_seg_copy(struct tcp_seg *seg)
1439 {
1440  struct tcp_seg *cseg;
1441 
1442  cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG);
1443  if (cseg == NULL) {
1444  return NULL;
1445  }
1446  SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg));
1447  pbuf_ref(cseg->p);
1448  return cseg;
1449 }
1450 #endif /* TCP_QUEUE_OOSEQ */
1451 
1452 #if LWIP_CALLBACK_API
1453 
1457 err_t
1458 tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err)
1459 {
1460  LWIP_UNUSED_ARG(arg);
1461  if (p != NULL) {
1462  tcp_recved(pcb, p->tot_len);
1463  pbuf_free(p);
1464  } else if (err == ERR_OK) {
1465  return tcp_close(pcb);
1466  }
1467  return ERR_OK;
1468 }
1469 #endif /* LWIP_CALLBACK_API */
1470 
1477 static void
1478 tcp_kill_prio(u8_t prio)
1479 {
1480  struct tcp_pcb *pcb, *inactive;
1481  u32_t inactivity;
1482  u8_t mprio;
1483 
1484  mprio = LWIP_MIN(TCP_PRIO_MAX, prio);
1485 
1486  /* We kill the oldest active connection that has lower priority than prio. */
1487  inactivity = 0;
1488  inactive = NULL;
1489  for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
1490  if (pcb->prio <= mprio &&
1491  (u32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
1492  inactivity = tcp_ticks - pcb->tmr;
1493  inactive = pcb;
1494  mprio = pcb->prio;
1495  }
1496  }
1497  if (inactive != NULL) {
1498  LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n",
1499  (void *)inactive, inactivity));
1500  tcp_abort(inactive);
1501  }
1502 }
1503 
1508 static void
1509 tcp_kill_state(enum tcp_state state)
1510 {
1511  struct tcp_pcb *pcb, *inactive;
1512  u32_t inactivity;
1513 
1514  LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK));
1515 
1516  inactivity = 0;
1517  inactive = NULL;
1518  /* Go through the list of active pcbs and get the oldest pcb that is in state
1519  CLOSING/LAST_ACK. */
1520  for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
1521  if (pcb->state == state) {
1522  if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
1523  inactivity = tcp_ticks - pcb->tmr;
1524  inactive = pcb;
1525  }
1526  }
1527  }
1528  if (inactive != NULL) {
1529  LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n",
1530  tcp_state_str[state], (void *)inactive, inactivity));
1531  /* Don't send a RST, since no data is lost. */
1532  tcp_abandon(inactive, 0);
1533  }
1534 }
1535 
1540 static void
1541 tcp_kill_timewait(void)
1542 {
1543  struct tcp_pcb *pcb, *inactive;
1544  u32_t inactivity;
1545 
1546  inactivity = 0;
1547  inactive = NULL;
1548  /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */
1549  for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
1550  if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
1551  inactivity = tcp_ticks - pcb->tmr;
1552  inactive = pcb;
1553  }
1554  }
1555  if (inactive != NULL) {
1556  LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n",
1557  (void *)inactive, inactivity));
1558  tcp_abort(inactive);
1559  }
1560 }
1561 
1568 struct tcp_pcb *
1569 tcp_alloc(u8_t prio)
1570 {
1571  struct tcp_pcb *pcb;
1572 
1573  pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
1574  if (pcb == NULL) {
1575  /* Try killing oldest connection in TIME-WAIT. */
1576  LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n"));
1577  tcp_kill_timewait();
1578  /* Try to allocate a tcp_pcb again. */
1579  pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
1580  if (pcb == NULL) {
1581  /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */
1582  LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n"));
1583  tcp_kill_state(LAST_ACK);
1584  /* Try to allocate a tcp_pcb again. */
1585  pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
1586  if (pcb == NULL) {
1587  /* Try killing oldest connection in CLOSING. */
1588  LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n"));
1589  tcp_kill_state(CLOSING);
1590  /* Try to allocate a tcp_pcb again. */
1591  pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
1592  if (pcb == NULL) {
1593  /* Try killing active connections with lower priority than the new one. */
1594  LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing connection with prio lower than %d\n", prio));
1595  tcp_kill_prio(prio);
1596  /* Try to allocate a tcp_pcb again. */
1597  pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB);
1598  if (pcb != NULL) {
1599  /* adjust err stats: memp_malloc failed multiple times before */
1600  MEMP_STATS_DEC(err, MEMP_TCP_PCB);
1601  }
1602  }
1603  if (pcb != NULL) {
1604  /* adjust err stats: memp_malloc failed multiple times before */
1605  MEMP_STATS_DEC(err, MEMP_TCP_PCB);
1606  }
1607  }
1608  if (pcb != NULL) {
1609  /* adjust err stats: memp_malloc failed multiple times before */
1610  MEMP_STATS_DEC(err, MEMP_TCP_PCB);
1611  }
1612  }
1613  if (pcb != NULL) {
1614  /* adjust err stats: memp_malloc failed above */
1615  MEMP_STATS_DEC(err, MEMP_TCP_PCB);
1616  }
1617  }
1618  if (pcb != NULL) {
1619  /* zero out the whole pcb, so there is no need to initialize members to zero */
1620  memset(pcb, 0, sizeof(struct tcp_pcb));
1621  pcb->prio = prio;
1622  pcb->snd_buf = TCP_SND_BUF;
1623  /* Start with a window that does not need scaling. When window scaling is
1624  enabled and used, the window is enlarged when both sides agree on scaling. */
1625  pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND);
1626  pcb->ttl = TCP_TTL;
1627  /* As initial send MSS, we use TCP_MSS but limit it to 536.
1628  The send MSS is updated when an MSS option is received. */
1629  pcb->mss = INITIAL_MSS;
1630  pcb->rto = 3000 / TCP_SLOW_INTERVAL;
1631  pcb->sv = 3000 / TCP_SLOW_INTERVAL;
1632  pcb->rtime = -1;
1633  pcb->cwnd = 1;
1634  pcb->tmr = tcp_ticks;
1635  pcb->last_timer = tcp_timer_ctr;
1636 
1637  /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example
1638  of using the largest advertised receive window. We've seen complications with
1639  receiving TCPs that use window scaling and/or window auto-tuning where the
1640  initial advertised window is very small and then grows rapidly once the
1641  connection is established. To avoid these complications, we set ssthresh to the
1642  largest effective cwnd (amount of in-flight data) that the sender can have. */
1643  pcb->ssthresh = TCP_SND_BUF;
1644 
1645 #if LWIP_CALLBACK_API
1646  pcb->recv = tcp_recv_null;
1647 #endif /* LWIP_CALLBACK_API */
1648 
1649  /* Init KEEPALIVE timer */
1650  pcb->keep_idle = TCP_KEEPIDLE_DEFAULT;
1651 
1652 #if LWIP_TCP_KEEPALIVE
1653  pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT;
1654  pcb->keep_cnt = TCP_KEEPCNT_DEFAULT;
1655 #endif /* LWIP_TCP_KEEPALIVE */
1656  }
1657  return pcb;
1658 }
1659 
1673 struct tcp_pcb *
1674 tcp_new(void)
1675 {
1676  return tcp_alloc(TCP_PRIO_NORMAL);
1677 }
1678 
1690 struct tcp_pcb *
1691 tcp_new_ip_type(u8_t type)
1692 {
1693  struct tcp_pcb * pcb;
1694  pcb = tcp_alloc(TCP_PRIO_NORMAL);
1695 #if LWIP_IPV4 && LWIP_IPV6
1696  if (pcb != NULL) {
1697  IP_SET_TYPE_VAL(pcb->local_ip, type);
1698  IP_SET_TYPE_VAL(pcb->remote_ip, type);
1699  }
1700 #else
1701  LWIP_UNUSED_ARG(type);
1702 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1703  return pcb;
1704 }
1705 
1714 void
1715 tcp_arg(struct tcp_pcb *pcb, void *arg)
1716 {
1717  /* This function is allowed to be called for both listen pcbs and
1718  connection pcbs. */
1719  if (pcb != NULL) {
1720  pcb->callback_arg = arg;
1721  }
1722 }
1723 #if LWIP_CALLBACK_API
1724 
1733 void
1734 tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv)
1735 {
1736  if (pcb != NULL) {
1737  LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN);
1738  pcb->recv = recv;
1739  }
1740 }
1741 
1750 void
1751 tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent)
1752 {
1753  if (pcb != NULL) {
1754  LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN);
1755  pcb->sent = sent;
1756  }
1757 }
1758 
1770 void
1771 tcp_err(struct tcp_pcb *pcb, tcp_err_fn err)
1772 {
1773  if (pcb != NULL) {
1774  LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN);
1775  pcb->errf = err;
1776  }
1777 }
1778 
1788 void
1789 tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept)
1790 {
1791  if ((pcb != NULL) && (pcb->state == LISTEN)) {
1792  struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen*)pcb;
1793  lpcb->accept = accept;
1794  }
1795 }
1796 #endif /* LWIP_CALLBACK_API */
1797 
1798 
1806 void
1807 tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval)
1808 {
1809  LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN);
1810 #if LWIP_CALLBACK_API
1811  pcb->poll = poll;
1812 #else /* LWIP_CALLBACK_API */
1813  LWIP_UNUSED_ARG(poll);
1814 #endif /* LWIP_CALLBACK_API */
1815  pcb->pollinterval = interval;
1816 }
1817 
1824 void
1825 tcp_pcb_purge(struct tcp_pcb *pcb)
1826 {
1827  if (pcb->state != CLOSED &&
1828  pcb->state != TIME_WAIT &&
1829  pcb->state != LISTEN) {
1830 
1831  LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n"));
1832 
1833  tcp_backlog_accepted(pcb);
1834 
1835  if (pcb->refused_data != NULL) {
1836  LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n"));
1837  pbuf_free(pcb->refused_data);
1838  pcb->refused_data = NULL;
1839  }
1840  if (pcb->unsent != NULL) {
1841  LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n"));
1842  }
1843  if (pcb->unacked != NULL) {
1844  LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n"));
1845  }
1846 #if TCP_QUEUE_OOSEQ
1847  if (pcb->ooseq != NULL) {
1848  LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n"));
1849  }
1850  tcp_segs_free(pcb->ooseq);
1851  pcb->ooseq = NULL;
1852 #endif /* TCP_QUEUE_OOSEQ */
1853 
1854  /* Stop the retransmission timer as it will expect data on unacked
1855  queue if it fires */
1856  pcb->rtime = -1;
1857 
1858  tcp_segs_free(pcb->unsent);
1859  tcp_segs_free(pcb->unacked);
1860  pcb->unacked = pcb->unsent = NULL;
1861 #if TCP_OVERSIZE
1862  pcb->unsent_oversize = 0;
1863 #endif /* TCP_OVERSIZE */
1864  }
1865 }
1866 
1873 void
1874 tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb)
1875 {
1876  TCP_RMV(pcblist, pcb);
1877 
1878  tcp_pcb_purge(pcb);
1879 
1880  /* if there is an outstanding delayed ACKs, send it */
1881  if (pcb->state != TIME_WAIT &&
1882  pcb->state != LISTEN &&
1883  pcb->flags & TF_ACK_DELAY) {
1884  pcb->flags |= TF_ACK_NOW;
1885  tcp_output(pcb);
1886  }
1887 
1888  if (pcb->state != LISTEN) {
1889  LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL);
1890  LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL);
1891 #if TCP_QUEUE_OOSEQ
1892  LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL);
1893 #endif /* TCP_QUEUE_OOSEQ */
1894  }
1895 
1896  pcb->state = CLOSED;
1897  /* reset the local port to prevent the pcb from being 'bound' */
1898  pcb->local_port = 0;
1899 
1900  LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane());
1901 }
1902 
1908 u32_t
1909 tcp_next_iss(struct tcp_pcb *pcb)
1910 {
1911 #ifdef LWIP_HOOK_TCP_ISN
1912  return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port);
1913 #else /* LWIP_HOOK_TCP_ISN */
1914  static u32_t iss = 6510;
1915 
1916  LWIP_UNUSED_ARG(pcb);
1917 
1918  iss += tcp_ticks; /* XXX */
1919  return iss;
1920 #endif /* LWIP_HOOK_TCP_ISN */
1921 }
1922 
1923 #if TCP_CALCULATE_EFF_SEND_MSS
1924 
1929 u16_t
1930 tcp_eff_send_mss_impl(u16_t sendmss, const ip_addr_t *dest
1931 #if LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING
1932  , const ip_addr_t *src
1933 #endif /* LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING */
1934  )
1935 {
1936  u16_t mss_s;
1937  struct netif *outif;
1938  s16_t mtu;
1939 
1940  outif = ip_route(src, dest);
1941 #if LWIP_IPV6
1942 #if LWIP_IPV4
1943  if (IP_IS_V6(dest))
1944 #endif /* LWIP_IPV4 */
1945  {
1946  /* First look in destination cache, to see if there is a Path MTU. */
1947  mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif);
1948  }
1949 #if LWIP_IPV4
1950  else
1951 #endif /* LWIP_IPV4 */
1952 #endif /* LWIP_IPV6 */
1953 #if LWIP_IPV4
1954  {
1955  if (outif == NULL) {
1956  return sendmss;
1957  }
1958  mtu = outif->mtu;
1959  }
1960 #endif /* LWIP_IPV4 */
1961 
1962  if (mtu != 0) {
1963 #if LWIP_IPV6
1964 #if LWIP_IPV4
1965  if (IP_IS_V6(dest))
1966 #endif /* LWIP_IPV4 */
1967  {
1968  mss_s = mtu - IP6_HLEN - TCP_HLEN;
1969  }
1970 #if LWIP_IPV4
1971  else
1972 #endif /* LWIP_IPV4 */
1973 #endif /* LWIP_IPV6 */
1974 #if LWIP_IPV4
1975  {
1976  mss_s = mtu - IP_HLEN - TCP_HLEN;
1977  }
1978 #endif /* LWIP_IPV4 */
1979  /* RFC 1122, chap 4.2.2.6:
1980  * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize
1981  * We correct for TCP options in tcp_write(), and don't support IP options.
1982  */
1983  sendmss = LWIP_MIN(sendmss, mss_s);
1984  }
1985  return sendmss;
1986 }
1987 #endif /* TCP_CALCULATE_EFF_SEND_MSS */
1988 
1990 static void
1991 tcp_netif_ip_addr_changed_pcblist(const ip_addr_t* old_addr, struct tcp_pcb* pcb_list)
1992 {
1993  struct tcp_pcb *pcb;
1994  pcb = pcb_list;
1995  while (pcb != NULL) {
1996  /* PCB bound to current local interface address? */
1997  if (ip_addr_cmp(&pcb->local_ip, old_addr)
1998 #if LWIP_AUTOIP
1999  /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */
2000  && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip)))
2001 #endif /* LWIP_AUTOIP */
2002  ) {
2003  /* this connection must be aborted */
2004  struct tcp_pcb *next = pcb->next;
2005  LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb));
2006  tcp_abort(pcb);
2007  pcb = next;
2008  } else {
2009  pcb = pcb->next;
2010  }
2011  }
2012 }
2013 
2019 void
2020 tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr)
2021 {
2022  struct tcp_pcb_listen *lpcb, *next;
2023 
2024  if (!ip_addr_isany(old_addr)) {
2025  tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs);
2026  tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs);
2027 
2028  if (!ip_addr_isany(new_addr)) {
2029  /* PCB bound to current local interface address? */
2030  for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = next) {
2031  next = lpcb->next;
2032  /* PCB bound to current local interface address? */
2033  if (ip_addr_cmp(&lpcb->local_ip, old_addr)) {
2034  /* The PCB is listening to the old ipaddr and
2035  * is set to listen to the new one instead */
2036  ip_addr_copy(lpcb->local_ip, *new_addr);
2037  }
2038  }
2039  }
2040  }
2041 }
2042 
2043 const char*
2044 tcp_debug_state_str(enum tcp_state s)
2045 {
2046  return tcp_state_str[s];
2047 }
2048 
2049 #if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG
2050 
2055 void
2056 tcp_debug_print(struct tcp_hdr *tcphdr)
2057 {
2058  LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n"));
2059  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2060  LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n",
2061  lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest)));
2062  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2063  LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n",
2064  lwip_ntohl(tcphdr->seqno)));
2065  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2066  LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n",
2067  lwip_ntohl(tcphdr->ackno)));
2068  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2069  LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (",
2070  TCPH_HDRLEN(tcphdr),
2071  (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1),
2072  (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1),
2073  (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1),
2074  (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1),
2075  (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1),
2076  (u16_t)(TCPH_FLAGS(tcphdr) & 1),
2077  lwip_ntohs(tcphdr->wnd)));
2078  tcp_debug_print_flags(TCPH_FLAGS(tcphdr));
2079  LWIP_DEBUGF(TCP_DEBUG, ("), win)\n"));
2080  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2081  LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n",
2082  lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp)));
2083  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
2084 }
2085 
2091 void
2092 tcp_debug_print_state(enum tcp_state s)
2093 {
2094  LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s]));
2095 }
2096 
2102 void
2103 tcp_debug_print_flags(u8_t flags)
2104 {
2105  if (flags & TCP_FIN) {
2106  LWIP_DEBUGF(TCP_DEBUG, ("FIN "));
2107  }
2108  if (flags & TCP_SYN) {
2109  LWIP_DEBUGF(TCP_DEBUG, ("SYN "));
2110  }
2111  if (flags & TCP_RST) {
2112  LWIP_DEBUGF(TCP_DEBUG, ("RST "));
2113  }
2114  if (flags & TCP_PSH) {
2115  LWIP_DEBUGF(TCP_DEBUG, ("PSH "));
2116  }
2117  if (flags & TCP_ACK) {
2118  LWIP_DEBUGF(TCP_DEBUG, ("ACK "));
2119  }
2120  if (flags & TCP_URG) {
2121  LWIP_DEBUGF(TCP_DEBUG, ("URG "));
2122  }
2123  if (flags & TCP_ECE) {
2124  LWIP_DEBUGF(TCP_DEBUG, ("ECE "));
2125  }
2126  if (flags & TCP_CWR) {
2127  LWIP_DEBUGF(TCP_DEBUG, ("CWR "));
2128  }
2129  LWIP_DEBUGF(TCP_DEBUG, ("\n"));
2130 }
2131 
2135 void
2136 tcp_debug_print_pcbs(void)
2137 {
2138  struct tcp_pcb *pcb;
2139  struct tcp_pcb_listen *pcbl;
2140 
2141  LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n"));
2142  for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
2143  LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
2144  pcb->local_port, pcb->remote_port,
2145  pcb->snd_nxt, pcb->rcv_nxt));
2146  tcp_debug_print_state(pcb->state);
2147  }
2148 
2149  LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n"));
2150  for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) {
2151  LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port));
2152  tcp_debug_print_state(pcbl->state);
2153  }
2154 
2155  LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n"));
2156  for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
2157  LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
2158  pcb->local_port, pcb->remote_port,
2159  pcb->snd_nxt, pcb->rcv_nxt));
2160  tcp_debug_print_state(pcb->state);
2161  }
2162 }
2163 
2167 s16_t
2168 tcp_pcbs_sane(void)
2169 {
2170  struct tcp_pcb *pcb;
2171  for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
2172  LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED);
2173  LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN);
2174  LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT);
2175  }
2176  for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
2177  LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
2178  }
2179  return 1;
2180 }
2181 #endif /* TCP_DEBUG */
2182 
2183 #endif /* LWIP_TCP */
u16_t tot_len
Definition: pbuf.h:175
Definition: err.h:109
Definition: err.h:86
#define TCP_MAXRTX
Definition: opt.h:1176
void pbuf_ref(struct pbuf *p)
Definition: pbuf.c:839
void memp_free(memp_t type, void *mem)
Definition: memp.c:488
Definition: err.h:84
#define TCP_RST_DEBUG
Definition: opt.h:2812
#define TCP_CWND_DEBUG
Definition: opt.h:2791
#define TCP_TTL
Definition: opt.h:1158
Definition: err.h:113
#define TCP_DEBUG
Definition: opt.h:2762
u8_t flags
Definition: pbuf.h:184
#define TCP_SYNMAXRTX
Definition: opt.h:1183
#define TCP_INPUT_DEBUG
Definition: opt.h:2769
Definition: pbuf.h:161
Definition: netif.h:244
void pbuf_cat(struct pbuf *h, struct pbuf *t)
Definition: pbuf.c:859
#define TCP_RTO_DEBUG
Definition: opt.h:2784
s8_t err_t
Definition: err.h:76
Definition: err.h:104
u16_t mtu
Definition: netif.h:318
#define LWIP_DEBUGF(debug, message)
#define PBUF_FLAG_TCP_FIN
Definition: pbuf.h:158
Definition: err.h:94
#define LWIP_UNUSED_ARG(x)
Definition: arch.h:327
#define TCP_WND_UPDATE_THRESHOLD
Definition: opt.h:1317
u8_t pbuf_free(struct pbuf *p)
Definition: pbuf.c:734
Definition: err.h:82
Definition: err.h:98
#define TCP_QUEUE_OOSEQ
Definition: opt.h:1191
Definition: err.h:90
void * memp_malloc(memp_t type)
Definition: memp.c:404
#define ip_get_option(pcb, opt)
Definition: ip.h:234
#define NETIF_DEBUG
Definition: opt.h:2657