The Pedigree Project  0.1
tcp_out.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008-2014, Pedigree Developers
3  *
4  * Please see the CONTRIB file in the root of the source tree for a full
5  * list of contributors.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
28 /*
29  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without modification,
33  * are permitted provided that the following conditions are met:
34  *
35  * 1. Redistributions of source code must retain the above copyright notice,
36  * this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright notice,
38  * this list of conditions and the following disclaimer in the documentation
39  * and/or other materials provided with the distribution.
40  * 3. The name of the author may not be used to endorse or promote products
41  * derived from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
44  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
46  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
47  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
48  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
51  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
52  * OF SUCH DAMAGE.
53  *
54  * This file is part of the lwIP TCP/IP stack.
55  *
56  * Author: Adam Dunkels <adam@sics.se>
57  *
58  */
59 
60 #include "lwip/opt.h"
61 
62 #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
63 
64 #include "lwip/priv/tcp_priv.h"
65 #include "lwip/def.h"
66 #include "lwip/mem.h"
67 #include "lwip/memp.h"
68 #include "lwip/ip_addr.h"
69 #include "lwip/netif.h"
70 #include "lwip/inet_chksum.h"
71 #include "lwip/stats.h"
72 #include "lwip/ip6.h"
73 #include "lwip/ip6_addr.h"
74 #if LWIP_TCP_TIMESTAMPS
75 #include "lwip/sys.h"
76 #endif
77 
78 #include <string.h>
79 
80 /* Define some copy-macros for checksum-on-copy so that the code looks
81  nicer by preventing too many ifdef's. */
82 #if TCP_CHECKSUM_ON_COPY
83 #define TCP_DATA_COPY(dst, src, len, seg) do { \
84  tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \
85  len, &seg->chksum, &seg->chksum_swapped); \
86  seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0)
87 #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \
88  tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped);
89 #else /* TCP_CHECKSUM_ON_COPY*/
90 #define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len)
91 #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len)
92 #endif /* TCP_CHECKSUM_ON_COPY*/
93 
96 #ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK
97 #define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0
98 #endif
99 /* Allow to override the failure of sanity check from warning to e.g. hard failure */
100 #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
101 #ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL
102 #define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg)
103 #endif
104 #endif
105 
106 #if TCP_OVERSIZE
107 
108 #ifndef TCP_OVERSIZE_CALC_LENGTH
109 #define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE)
110 #endif
111 #endif
112 
113 /* Forward declarations.*/
114 static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif);
115 
126 static struct pbuf *
127 tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen,
128  u32_t seqno_be /* already in network byte order */)
129 {
130  struct tcp_hdr *tcphdr;
131  struct pbuf *p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM);
132  if (p != NULL) {
133  LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
134  (p->len >= TCP_HLEN + optlen));
135  tcphdr = (struct tcp_hdr *)p->payload;
136  tcphdr->src = lwip_htons(pcb->local_port);
137  tcphdr->dest = lwip_htons(pcb->remote_port);
138  tcphdr->seqno = seqno_be;
139  tcphdr->ackno = lwip_htonl(pcb->rcv_nxt);
140  TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), TCP_ACK);
141  tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd)));
142  tcphdr->chksum = 0;
143  tcphdr->urgp = 0;
144 
145  /* If we're sending a packet, update the announced right window edge */
146  pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
147  }
148  return p;
149 }
150 
157 err_t
158 tcp_send_fin(struct tcp_pcb *pcb)
159 {
160  /* first, try to add the fin to the last unsent segment */
161  if (pcb->unsent != NULL) {
162  struct tcp_seg *last_unsent;
163  for (last_unsent = pcb->unsent; last_unsent->next != NULL;
164  last_unsent = last_unsent->next);
165 
166  if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) {
167  /* no SYN/FIN/RST flag in the header, we can add the FIN flag */
168  TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN);
169  pcb->flags |= TF_FIN;
170  return ERR_OK;
171  }
172  }
173  /* no data, no length, flags, copy=1, no optdata */
174  return tcp_enqueue_flags(pcb, TCP_FIN);
175 }
176 
191 static struct tcp_seg *
192 tcp_create_segment(struct tcp_pcb *pcb, struct pbuf *p, u8_t flags, u32_t seqno, u8_t optflags)
193 {
194  struct tcp_seg *seg;
195  u8_t optlen = LWIP_TCP_OPT_LENGTH(optflags);
196 
197  if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) {
198  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n"));
199  pbuf_free(p);
200  return NULL;
201  }
202  seg->flags = optflags;
203  seg->next = NULL;
204  seg->p = p;
205  LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen);
206  seg->len = p->tot_len - optlen;
207 #if TCP_OVERSIZE_DBGCHECK
208  seg->oversize_left = 0;
209 #endif /* TCP_OVERSIZE_DBGCHECK */
210 #if TCP_CHECKSUM_ON_COPY
211  seg->chksum = 0;
212  seg->chksum_swapped = 0;
213  /* check optflags */
214  LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED",
215  (optflags & TF_SEG_DATA_CHECKSUMMED) == 0);
216 #endif /* TCP_CHECKSUM_ON_COPY */
217 
218  /* build TCP header */
219  if (pbuf_header(p, TCP_HLEN)) {
220  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n"));
221  TCP_STATS_INC(tcp.err);
222  tcp_seg_free(seg);
223  return NULL;
224  }
225  seg->tcphdr = (struct tcp_hdr *)seg->p->payload;
226  seg->tcphdr->src = lwip_htons(pcb->local_port);
227  seg->tcphdr->dest = lwip_htons(pcb->remote_port);
228  seg->tcphdr->seqno = lwip_htonl(seqno);
229  /* ackno is set in tcp_output */
230  TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), flags);
231  /* wnd and chksum are set in tcp_output */
232  seg->tcphdr->urgp = 0;
233  return seg;
234 }
235 
250 #if TCP_OVERSIZE
251 static struct pbuf *
252 tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length,
253  u16_t *oversize, struct tcp_pcb *pcb, u8_t apiflags,
254  u8_t first_seg)
255 {
256  struct pbuf *p;
257  u16_t alloc = length;
258 
259 #if LWIP_NETIF_TX_SINGLE_PBUF
260  LWIP_UNUSED_ARG(max_length);
261  LWIP_UNUSED_ARG(pcb);
262  LWIP_UNUSED_ARG(apiflags);
263  LWIP_UNUSED_ARG(first_seg);
264  alloc = max_length;
265 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
266  if (length < max_length) {
267  /* Should we allocate an oversized pbuf, or just the minimum
268  * length required? If tcp_write is going to be called again
269  * before this segment is transmitted, we want the oversized
270  * buffer. If the segment will be transmitted immediately, we can
271  * save memory by allocating only length. We use a simple
272  * heuristic based on the following information:
273  *
274  * Did the user set TCP_WRITE_FLAG_MORE?
275  *
276  * Will the Nagle algorithm defer transmission of this segment?
277  */
278  if ((apiflags & TCP_WRITE_FLAG_MORE) ||
279  (!(pcb->flags & TF_NODELAY) &&
280  (!first_seg ||
281  pcb->unsent != NULL ||
282  pcb->unacked != NULL))) {
283  alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length)));
284  }
285  }
286 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
287  p = pbuf_alloc(layer, alloc, PBUF_RAM);
288  if (p == NULL) {
289  return NULL;
290  }
291  LWIP_ASSERT("need unchained pbuf", p->next == NULL);
292  *oversize = p->len - length;
293  /* trim p->len to the currently used size */
294  p->len = p->tot_len = length;
295  return p;
296 }
297 #else /* TCP_OVERSIZE */
298 #define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM)
299 #endif /* TCP_OVERSIZE */
300 
301 #if TCP_CHECKSUM_ON_COPY
302 
303 static void
304 tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum,
305  u8_t *seg_chksum_swapped)
306 {
307  u32_t helper;
308  /* add chksum to old chksum and fold to u16_t */
309  helper = chksum + *seg_chksum;
310  chksum = FOLD_U32T(helper);
311  if ((len & 1) != 0) {
312  *seg_chksum_swapped = 1 - *seg_chksum_swapped;
313  chksum = SWAP_BYTES_IN_WORD(chksum);
314  }
315  *seg_chksum = chksum;
316 }
317 #endif /* TCP_CHECKSUM_ON_COPY */
318 
325 static err_t
326 tcp_write_checks(struct tcp_pcb *pcb, u16_t len)
327 {
328  /* connection is in invalid state for data transmission? */
329  if ((pcb->state != ESTABLISHED) &&
330  (pcb->state != CLOSE_WAIT) &&
331  (pcb->state != SYN_SENT) &&
332  (pcb->state != SYN_RCVD)) {
333  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n"));
334  return ERR_CONN;
335  } else if (len == 0) {
336  return ERR_OK;
337  }
338 
339  /* fail on too much data */
340  if (len > pcb->snd_buf) {
341  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n",
342  len, pcb->snd_buf));
343  pcb->flags |= TF_NAGLEMEMERR;
344  return ERR_MEM;
345  }
346 
347  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen));
348 
349  /* If total number of pbufs on the unsent/unacked queues exceeds the
350  * configured maximum, return an error */
351  /* check for configured max queuelen and possible overflow */
352  if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
353  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n",
354  pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN));
355  TCP_STATS_INC(tcp.memerr);
356  pcb->flags |= TF_NAGLEMEMERR;
357  return ERR_MEM;
358  }
359  if (pcb->snd_queuelen != 0) {
360  LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty",
361  pcb->unacked != NULL || pcb->unsent != NULL);
362  } else {
363  LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty",
364  pcb->unacked == NULL && pcb->unsent == NULL);
365  }
366  return ERR_OK;
367 }
368 
386 err_t
387 tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags)
388 {
389  struct pbuf *concat_p = NULL;
390  struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL;
391  u16_t pos = 0; /* position in 'arg' data */
392  u16_t queuelen;
393  u8_t optlen = 0;
394  u8_t optflags = 0;
395 #if TCP_OVERSIZE
396  u16_t oversize = 0;
397  u16_t oversize_used = 0;
398 #if TCP_OVERSIZE_DBGCHECK
399  u16_t oversize_add = 0;
400 #endif /* TCP_OVERSIZE_DBGCHECK*/
401 #endif /* TCP_OVERSIZE */
402  u16_t extendlen = 0;
403 #if TCP_CHECKSUM_ON_COPY
404  u16_t concat_chksum = 0;
405  u8_t concat_chksum_swapped = 0;
406  u16_t concat_chksummed = 0;
407 #endif /* TCP_CHECKSUM_ON_COPY */
408  err_t err;
409  /* don't allocate segments bigger than half the maximum window we ever received */
410  u16_t mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max/2));
411  mss_local = mss_local ? mss_local : pcb->mss;
412 
413 #if LWIP_NETIF_TX_SINGLE_PBUF
414  /* Always copy to try to create single pbufs for TX */
415  apiflags |= TCP_WRITE_FLAG_COPY;
416 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
417 
418  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n",
419  (void *)pcb, arg, len, (u16_t)apiflags));
420  LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)",
421  arg != NULL, return ERR_ARG;);
422 
423  err = tcp_write_checks(pcb, len);
424  if (err != ERR_OK) {
425  return err;
426  }
427  queuelen = pcb->snd_queuelen;
428 
429 #if LWIP_TCP_TIMESTAMPS
430  if ((pcb->flags & TF_TIMESTAMP)) {
431  /* Make sure the timestamp option is only included in data segments if we
432  agreed about it with the remote host. */
433  optflags = TF_SEG_OPTS_TS;
434  optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS);
435  /* ensure that segments can hold at least one data byte... */
436  mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1);
437  }
438 #endif /* LWIP_TCP_TIMESTAMPS */
439 
440 
441  /*
442  * TCP segmentation is done in three phases with increasing complexity:
443  *
444  * 1. Copy data directly into an oversized pbuf.
445  * 2. Chain a new pbuf to the end of pcb->unsent.
446  * 3. Create new segments.
447  *
448  * We may run out of memory at any point. In that case we must
449  * return ERR_MEM and not change anything in pcb. Therefore, all
450  * changes are recorded in local variables and committed at the end
451  * of the function. Some pcb fields are maintained in local copies:
452  *
453  * queuelen = pcb->snd_queuelen
454  * oversize = pcb->unsent_oversize
455  *
456  * These variables are set consistently by the phases:
457  *
458  * seg points to the last segment tampered with.
459  *
460  * pos records progress as data is segmented.
461  */
462 
463  /* Find the tail of the unsent queue. */
464  if (pcb->unsent != NULL) {
465  u16_t space;
466  u16_t unsent_optlen;
467 
468  /* @todo: this could be sped up by keeping last_unsent in the pcb */
469  for (last_unsent = pcb->unsent; last_unsent->next != NULL;
470  last_unsent = last_unsent->next);
471 
472  /* Usable space at the end of the last unsent segment */
473  unsent_optlen = LWIP_TCP_OPT_LENGTH(last_unsent->flags);
474  LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen);
475  space = mss_local - (last_unsent->len + unsent_optlen);
476 
477  /*
478  * Phase 1: Copy data directly into an oversized pbuf.
479  *
480  * The number of bytes copied is recorded in the oversize_used
481  * variable. The actual copying is done at the bottom of the
482  * function.
483  */
484 #if TCP_OVERSIZE
485 #if TCP_OVERSIZE_DBGCHECK
486  /* check that pcb->unsent_oversize matches last_unsent->oversize_left */
487  LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)",
488  pcb->unsent_oversize == last_unsent->oversize_left);
489 #endif /* TCP_OVERSIZE_DBGCHECK */
490  oversize = pcb->unsent_oversize;
491  if (oversize > 0) {
492  LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space);
493  seg = last_unsent;
494  oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len));
495  pos += oversize_used;
496  oversize -= oversize_used;
497  space -= oversize_used;
498  }
499  /* now we are either finished or oversize is zero */
500  LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len));
501 #endif /* TCP_OVERSIZE */
502 
503  /*
504  * Phase 2: Chain a new pbuf to the end of pcb->unsent.
505  *
506  * As an exception when NOT copying the data, if the given data buffer
507  * directly follows the last unsent data buffer in memory, extend the last
508  * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation.
509  *
510  * We don't extend segments containing SYN/FIN flags or options
511  * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at
512  * the end.
513  */
514  if ((pos < len) && (space > 0) && (last_unsent->len > 0)) {
515  u16_t seglen = LWIP_MIN(space, len - pos);
516  seg = last_unsent;
517 
518  /* Create a pbuf with a copy or reference to seglen bytes. We
519  * can use PBUF_RAW here since the data appears in the middle of
520  * a segment. A header will never be prepended. */
521  if (apiflags & TCP_WRITE_FLAG_COPY) {
522  /* Data is copied */
523  if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) {
525  ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n",
526  seglen));
527  goto memerr;
528  }
529 #if TCP_OVERSIZE_DBGCHECK
530  oversize_add = oversize;
531 #endif /* TCP_OVERSIZE_DBGCHECK */
532  TCP_DATA_COPY2(concat_p->payload, (const u8_t*)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped);
533 #if TCP_CHECKSUM_ON_COPY
534  concat_chksummed += seglen;
535 #endif /* TCP_CHECKSUM_ON_COPY */
536  queuelen += pbuf_clen(concat_p);
537  } else {
538  /* Data is not copied */
539  /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */
540  struct pbuf *p;
541  for (p = last_unsent->p; p->next != NULL; p = p->next);
542  if (p->type == PBUF_ROM && (const u8_t *)p->payload + p->len == (const u8_t *)arg) {
543  LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0);
544  extendlen = seglen;
545  } else {
546  if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) {
548  ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
549  goto memerr;
550  }
551  /* reference the non-volatile payload data */
552  ((struct pbuf_rom*)concat_p)->payload = (const u8_t*)arg + pos;
553  queuelen += pbuf_clen(concat_p);
554  }
555 #if TCP_CHECKSUM_ON_COPY
556  /* calculate the checksum of nocopy-data */
557  tcp_seg_add_chksum(~inet_chksum((const u8_t*)arg + pos, seglen), seglen,
558  &concat_chksum, &concat_chksum_swapped);
559  concat_chksummed += seglen;
560 #endif /* TCP_CHECKSUM_ON_COPY */
561  }
562 
563  pos += seglen;
564  }
565  } else {
566 #if TCP_OVERSIZE
567  LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)",
568  pcb->unsent_oversize == 0);
569 #endif /* TCP_OVERSIZE */
570  }
571 
572  /*
573  * Phase 3: Create new segments.
574  *
575  * The new segments are chained together in the local 'queue'
576  * variable, ready to be appended to pcb->unsent.
577  */
578  while (pos < len) {
579  struct pbuf *p;
580  u16_t left = len - pos;
581  u16_t max_len = mss_local - optlen;
582  u16_t seglen = LWIP_MIN(left, max_len);
583 #if TCP_CHECKSUM_ON_COPY
584  u16_t chksum = 0;
585  u8_t chksum_swapped = 0;
586 #endif /* TCP_CHECKSUM_ON_COPY */
587 
588  if (apiflags & TCP_WRITE_FLAG_COPY) {
589  /* If copy is set, memory should be allocated and data copied
590  * into pbuf */
591  if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) {
592  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen));
593  goto memerr;
594  }
595  LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen",
596  (p->len >= seglen));
597  TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t*)arg + pos, seglen, &chksum, &chksum_swapped);
598  } else {
599  /* Copy is not set: First allocate a pbuf for holding the data.
600  * Since the referenced data is available at least until it is
601  * sent out on the link (as it has to be ACKed by the remote
602  * party) we can safely use PBUF_ROM instead of PBUF_REF here.
603  */
604  struct pbuf *p2;
605 #if TCP_OVERSIZE
606  LWIP_ASSERT("oversize == 0", oversize == 0);
607 #endif /* TCP_OVERSIZE */
608  if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) {
609  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
610  goto memerr;
611  }
612 #if TCP_CHECKSUM_ON_COPY
613  /* calculate the checksum of nocopy-data */
614  chksum = ~inet_chksum((const u8_t*)arg + pos, seglen);
615  if (seglen & 1) {
616  chksum_swapped = 1;
617  chksum = SWAP_BYTES_IN_WORD(chksum);
618  }
619 #endif /* TCP_CHECKSUM_ON_COPY */
620  /* reference the non-volatile payload data */
621  ((struct pbuf_rom*)p2)->payload = (const u8_t*)arg + pos;
622 
623  /* Second, allocate a pbuf for the headers. */
624  if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
625  /* If allocation fails, we have to deallocate the data pbuf as
626  * well. */
627  pbuf_free(p2);
628  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n"));
629  goto memerr;
630  }
631  /* Concatenate the headers and data pbufs together. */
632  pbuf_cat(p/*header*/, p2/*data*/);
633  }
634 
635  queuelen += pbuf_clen(p);
636 
637  /* Now that there are more segments queued, we check again if the
638  * length of the queue exceeds the configured maximum or
639  * overflows. */
640  if ((queuelen > TCP_SND_QUEUELEN) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
641  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n",
642  queuelen, (int)TCP_SND_QUEUELEN));
643  pbuf_free(p);
644  goto memerr;
645  }
646 
647  if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) {
648  goto memerr;
649  }
650 #if TCP_OVERSIZE_DBGCHECK
651  seg->oversize_left = oversize;
652 #endif /* TCP_OVERSIZE_DBGCHECK */
653 #if TCP_CHECKSUM_ON_COPY
654  seg->chksum = chksum;
655  seg->chksum_swapped = chksum_swapped;
656  seg->flags |= TF_SEG_DATA_CHECKSUMMED;
657 #endif /* TCP_CHECKSUM_ON_COPY */
658 
659  /* first segment of to-be-queued data? */
660  if (queue == NULL) {
661  queue = seg;
662  } else {
663  /* Attach the segment to the end of the queued segments */
664  LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL);
665  prev_seg->next = seg;
666  }
667  /* remember last segment of to-be-queued data for next iteration */
668  prev_seg = seg;
669 
670  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n",
671  lwip_ntohl(seg->tcphdr->seqno),
672  lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg)));
673 
674  pos += seglen;
675  }
676 
677  /*
678  * All three segmentation phases were successful. We can commit the
679  * transaction.
680  */
681 #if TCP_OVERSIZE_DBGCHECK
682  if ((last_unsent != NULL) && (oversize_add != 0)) {
683  last_unsent->oversize_left += oversize_add;
684  }
685 #endif /* TCP_OVERSIZE_DBGCHECK */
686 
687  /*
688  * Phase 1: If data has been added to the preallocated tail of
689  * last_unsent, we update the length fields of the pbuf chain.
690  */
691 #if TCP_OVERSIZE
692  if (oversize_used > 0) {
693  struct pbuf *p;
694  /* Bump tot_len of whole chain, len of tail */
695  for (p = last_unsent->p; p; p = p->next) {
696  p->tot_len += oversize_used;
697  if (p->next == NULL) {
698  TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent);
699  p->len += oversize_used;
700  }
701  }
702  last_unsent->len += oversize_used;
703 #if TCP_OVERSIZE_DBGCHECK
704  LWIP_ASSERT("last_unsent->oversize_left >= oversize_used",
705  last_unsent->oversize_left >= oversize_used);
706  last_unsent->oversize_left -= oversize_used;
707 #endif /* TCP_OVERSIZE_DBGCHECK */
708  }
709  pcb->unsent_oversize = oversize;
710 #endif /* TCP_OVERSIZE */
711 
712  /*
713  * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we
714  * determined that the last ROM pbuf can be extended to include the new data.
715  */
716  if (concat_p != NULL) {
717  LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty",
718  (last_unsent != NULL));
719  pbuf_cat(last_unsent->p, concat_p);
720  last_unsent->len += concat_p->tot_len;
721  } else if (extendlen > 0) {
722  struct pbuf *p;
723  LWIP_ASSERT("tcp_write: extension of reference requires reference",
724  last_unsent != NULL && last_unsent->p != NULL);
725  for (p = last_unsent->p; p->next != NULL; p = p->next) {
726  p->tot_len += extendlen;
727  }
728  p->tot_len += extendlen;
729  p->len += extendlen;
730  last_unsent->len += extendlen;
731  }
732 
733 #if TCP_CHECKSUM_ON_COPY
734  if (concat_chksummed) {
735  LWIP_ASSERT("tcp_write: concat checksum needs concatenated data",
736  concat_p != NULL || extendlen > 0);
737  /*if concat checksumm swapped - swap it back */
738  if (concat_chksum_swapped) {
739  concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum);
740  }
741  tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum,
742  &last_unsent->chksum_swapped);
743  last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED;
744  }
745 #endif /* TCP_CHECKSUM_ON_COPY */
746 
747  /*
748  * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that
749  * is harmless
750  */
751  if (last_unsent == NULL) {
752  pcb->unsent = queue;
753  } else {
754  last_unsent->next = queue;
755  }
756 
757  /*
758  * Finally update the pcb state.
759  */
760  pcb->snd_lbb += len;
761  pcb->snd_buf -= len;
762  pcb->snd_queuelen = queuelen;
763 
764  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n",
765  pcb->snd_queuelen));
766  if (pcb->snd_queuelen != 0) {
767  LWIP_ASSERT("tcp_write: valid queue length",
768  pcb->unacked != NULL || pcb->unsent != NULL);
769  }
770 
771  /* Set the PSH flag in the last segment that we enqueued. */
772  if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE)==0)) {
773  TCPH_SET_FLAG(seg->tcphdr, TCP_PSH);
774  }
775 
776  return ERR_OK;
777 memerr:
778  pcb->flags |= TF_NAGLEMEMERR;
779  TCP_STATS_INC(tcp.memerr);
780 
781  if (concat_p != NULL) {
782  pbuf_free(concat_p);
783  }
784  if (queue != NULL) {
785  tcp_segs_free(queue);
786  }
787  if (pcb->snd_queuelen != 0) {
788  LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL ||
789  pcb->unsent != NULL);
790  }
791  LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen));
792  return ERR_MEM;
793 }
794 
803 err_t
804 tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags)
805 {
806  struct pbuf *p;
807  struct tcp_seg *seg;
808  u8_t optflags = 0;
809  u8_t optlen = 0;
810 
811  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));
812 
813  LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)",
814  (flags & (TCP_SYN | TCP_FIN)) != 0);
815 
816  /* check for configured max queuelen and possible overflow (FIN flag should always come through!) */
817  if (((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) &&
818  ((flags & TCP_FIN) == 0)) {
819  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_enqueue_flags: too long queue %"U16_F" (max %"U16_F")\n",
820  pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN));
821  TCP_STATS_INC(tcp.memerr);
822  pcb->flags |= TF_NAGLEMEMERR;
823  return ERR_MEM;
824  }
825 
826  if (flags & TCP_SYN) {
827  optflags = TF_SEG_OPTS_MSS;
828 #if LWIP_WND_SCALE
829  if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) {
830  /* In a <SYN,ACK> (sent in state SYN_RCVD), the window scale option may only
831  be sent if we received a window scale option from the remote host. */
832  optflags |= TF_SEG_OPTS_WND_SCALE;
833  }
834 #endif /* LWIP_WND_SCALE */
835  }
836 #if LWIP_TCP_TIMESTAMPS
837  if ((pcb->flags & TF_TIMESTAMP)) {
838  /* Make sure the timestamp option is only included in data segments if we
839  agreed about it with the remote host. */
840  optflags |= TF_SEG_OPTS_TS;
841  }
842 #endif /* LWIP_TCP_TIMESTAMPS */
843  optlen = LWIP_TCP_OPT_LENGTH(optflags);
844 
845  /* Allocate pbuf with room for TCP header + options */
846  if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
847  pcb->flags |= TF_NAGLEMEMERR;
848  TCP_STATS_INC(tcp.memerr);
849  return ERR_MEM;
850  }
851  LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen",
852  (p->len >= optlen));
853 
854  /* Allocate memory for tcp_seg, and fill in fields. */
855  if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) {
856  pcb->flags |= TF_NAGLEMEMERR;
857  TCP_STATS_INC(tcp.memerr);
858  return ERR_MEM;
859  }
860  LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0);
861  LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0);
862 
864  ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n",
865  lwip_ntohl(seg->tcphdr->seqno),
866  lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg),
867  (u16_t)flags));
868 
869  /* Now append seg to pcb->unsent queue */
870  if (pcb->unsent == NULL) {
871  pcb->unsent = seg;
872  } else {
873  struct tcp_seg *useg;
874  for (useg = pcb->unsent; useg->next != NULL; useg = useg->next);
875  useg->next = seg;
876  }
877 #if TCP_OVERSIZE
878  /* The new unsent tail has no space */
879  pcb->unsent_oversize = 0;
880 #endif /* TCP_OVERSIZE */
881 
882  /* SYN and FIN bump the sequence number */
883  if ((flags & TCP_SYN) || (flags & TCP_FIN)) {
884  pcb->snd_lbb++;
885  /* optlen does not influence snd_buf */
886  }
887  if (flags & TCP_FIN) {
888  pcb->flags |= TF_FIN;
889  }
890 
891  /* update number of segments on the queues */
892  pcb->snd_queuelen += pbuf_clen(seg->p);
893  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen));
894  if (pcb->snd_queuelen != 0) {
895  LWIP_ASSERT("tcp_enqueue_flags: invalid queue length",
896  pcb->unacked != NULL || pcb->unsent != NULL);
897  }
898 
899  return ERR_OK;
900 }
901 
902 #if LWIP_TCP_TIMESTAMPS
903 /* Build a timestamp option (12 bytes long) at the specified options pointer)
904  *
905  * @param pcb tcp_pcb
906  * @param opts option pointer where to store the timestamp option
907  */
908 static void
909 tcp_build_timestamp_option(struct tcp_pcb *pcb, u32_t *opts)
910 {
911  /* Pad with two NOP options to make everything nicely aligned */
912  opts[0] = PP_HTONL(0x0101080A);
913  opts[1] = lwip_htonl(sys_now());
914  opts[2] = lwip_htonl(pcb->ts_recent);
915 }
916 #endif
917 
918 #if LWIP_WND_SCALE
919 
923 static void
924 tcp_build_wnd_scale_option(u32_t *opts)
925 {
926  /* Pad with one NOP option to make everything nicely aligned */
927  opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE);
928 }
929 #endif
930 
936 err_t
937 tcp_send_empty_ack(struct tcp_pcb *pcb)
938 {
939  err_t err;
940  struct pbuf *p;
941  u8_t optlen = 0;
942  struct netif *netif;
943 #if LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP
944  struct tcp_hdr *tcphdr;
945 #endif /* LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP */
946 
947 #if LWIP_TCP_TIMESTAMPS
948  if (pcb->flags & TF_TIMESTAMP) {
949  optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS);
950  }
951 #endif
952 
953  p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt));
954  if (p == NULL) {
955  /* let tcp_fasttmr retry sending this ACK */
956  pcb->flags |= (TF_ACK_DELAY | TF_ACK_NOW);
957  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n"));
958  return ERR_BUF;
959  }
960 #if LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP
961  tcphdr = (struct tcp_hdr *)p->payload;
962 #endif /* LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP */
964  ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt));
965 
966  /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */
967 #if LWIP_TCP_TIMESTAMPS
968  pcb->ts_lastacksent = pcb->rcv_nxt;
969 
970  if (pcb->flags & TF_TIMESTAMP) {
971  tcp_build_timestamp_option(pcb, (u32_t *)(tcphdr + 1));
972  }
973 #endif
974 
975  netif = ip_route(&pcb->local_ip, &pcb->remote_ip);
976  if (netif == NULL) {
977  err = ERR_RTE;
978  } else {
979 #if CHECKSUM_GEN_TCP
980  IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
981  tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
982  &pcb->local_ip, &pcb->remote_ip);
983  }
984 #endif
985  NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint));
986  err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip,
987  pcb->ttl, pcb->tos, IP_PROTO_TCP, netif);
988  NETIF_SET_HWADDRHINT(netif, NULL);
989  }
990  pbuf_free(p);
991 
992  if (err != ERR_OK) {
993  /* let tcp_fasttmr retry sending this ACK */
994  pcb->flags |= (TF_ACK_DELAY | TF_ACK_NOW);
995  } else {
996  /* remove ACK flags from the PCB, as we sent an empty ACK now */
997  pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
998  }
999 
1000  return err;
1001 }
1002 
1011 err_t
1012 tcp_output(struct tcp_pcb *pcb)
1013 {
1014  struct tcp_seg *seg, *useg;
1015  u32_t wnd, snd_nxt;
1016  err_t err;
1017  struct netif *netif;
1018 #if TCP_CWND_DEBUG
1019  s16_t i = 0;
1020 #endif /* TCP_CWND_DEBUG */
1021 
1022  /* pcb->state LISTEN not allowed here */
1023  LWIP_ASSERT("don't call tcp_output for listen-pcbs",
1024  pcb->state != LISTEN);
1025 
1026  /* First, check if we are invoked by the TCP input processing
1027  code. If so, we do not output anything. Instead, we rely on the
1028  input processing code to call us when input processing is done
1029  with. */
1030  if (tcp_input_pcb == pcb) {
1031  return ERR_OK;
1032  }
1033 
1034  wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd);
1035 
1036  seg = pcb->unsent;
1037 
1038  /* If the TF_ACK_NOW flag is set and no data will be sent (either
1039  * because the ->unsent queue is empty or because the window does
1040  * not allow it), construct an empty ACK segment and send it.
1041  *
1042  * If data is to be sent, we will just piggyback the ACK (see below).
1043  */
1044  if (pcb->flags & TF_ACK_NOW &&
1045  (seg == NULL ||
1046  lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd)) {
1047  return tcp_send_empty_ack(pcb);
1048  }
1049 
1050  /* useg should point to last segment on unacked queue */
1051  useg = pcb->unacked;
1052  if (useg != NULL) {
1053  for (; useg->next != NULL; useg = useg->next);
1054  }
1055 
1056  netif = ip_route(&pcb->local_ip, &pcb->remote_ip);
1057  if (netif == NULL) {
1058  return ERR_RTE;
1059  }
1060 
1061  /* If we don't have a local IP address, we get one from netif */
1062  if (ip_addr_isany(&pcb->local_ip)) {
1063  const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip);
1064  if (local_ip == NULL) {
1065  return ERR_RTE;
1066  }
1067  ip_addr_copy(pcb->local_ip, *local_ip);
1068  }
1069 
1070 #if TCP_OUTPUT_DEBUG
1071  if (seg == NULL) {
1072  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n",
1073  (void*)pcb->unsent));
1074  }
1075 #endif /* TCP_OUTPUT_DEBUG */
1076 #if TCP_CWND_DEBUG
1077  if (seg == NULL) {
1078  LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F
1079  ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F
1080  ", seg == NULL, ack %"U32_F"\n",
1081  pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack));
1082  } else {
1084  ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F
1085  ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n",
1086  pcb->snd_wnd, pcb->cwnd, wnd,
1087  lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len,
1088  lwip_ntohl(seg->tcphdr->seqno), pcb->lastack));
1089  }
1090 #endif /* TCP_CWND_DEBUG */
1091  /* Check if we need to start the persistent timer when the next unsent segment
1092  * does not fit within the remaining send window and RTO timer is not running (we
1093  * have no in-flight data). A traditional approach would fill the remaining window
1094  * with part of the unsent segment (which will engage zero-window probing upon
1095  * reception of the zero window update from the receiver). This ensures the
1096  * subsequent window update is reliably received. With the goal of being lightweight,
1097  * we avoid splitting the unsent segment and treat the window as already zero.
1098  */
1099  if (seg != NULL &&
1100  lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd &&
1101  wnd > 0 && wnd == pcb->snd_wnd && pcb->unacked == NULL) {
1102  /* Start the persist timer */
1103  if (pcb->persist_backoff == 0) {
1104  pcb->persist_cnt = 0;
1105  pcb->persist_backoff = 1;
1106  }
1107  goto output_done;
1108  }
1109  /* data available and window allows it to be sent? */
1110  while (seg != NULL &&
1111  lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) {
1112  LWIP_ASSERT("RST not expected here!",
1113  (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0);
1114  /* Stop sending if the nagle algorithm would prevent it
1115  * Don't stop:
1116  * - if tcp_write had a memory error before (prevent delayed ACK timeout) or
1117  * - if FIN was already enqueued for this PCB (SYN is always alone in a segment -
1118  * either seg->next != NULL or pcb->unacked == NULL;
1119  * RST is no sent using tcp_write/tcp_output.
1120  */
1121  if ((tcp_do_output_nagle(pcb) == 0) &&
1122  ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) {
1123  break;
1124  }
1125 #if TCP_CWND_DEBUG
1126  LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n",
1127  pcb->snd_wnd, pcb->cwnd, wnd,
1128  lwip_ntohl(seg->tcphdr->seqno) + seg->len -
1129  pcb->lastack,
1130  lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i));
1131  ++i;
1132 #endif /* TCP_CWND_DEBUG */
1133 
1134  if (pcb->state != SYN_SENT) {
1135  TCPH_SET_FLAG(seg->tcphdr, TCP_ACK);
1136  }
1137 
1138 #if TCP_OVERSIZE_DBGCHECK
1139  seg->oversize_left = 0;
1140 #endif /* TCP_OVERSIZE_DBGCHECK */
1141  err = tcp_output_segment(seg, pcb, netif);
1142  if (err != ERR_OK) {
1143  /* segment could not be sent, for whatever reason */
1144  pcb->flags |= TF_NAGLEMEMERR;
1145  return err;
1146  }
1147  pcb->unsent = seg->next;
1148  if (pcb->state != SYN_SENT) {
1149  pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
1150  }
1151  snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg);
1152  if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
1153  pcb->snd_nxt = snd_nxt;
1154  }
1155  /* put segment on unacknowledged list if length > 0 */
1156  if (TCP_TCPLEN(seg) > 0) {
1157  seg->next = NULL;
1158  /* unacked list is empty? */
1159  if (pcb->unacked == NULL) {
1160  pcb->unacked = seg;
1161  useg = seg;
1162  /* unacked list is not empty? */
1163  } else {
1164  /* In the case of fast retransmit, the packet should not go to the tail
1165  * of the unacked queue, but rather somewhere before it. We need to check for
1166  * this case. -STJ Jul 27, 2004 */
1167  if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) {
1168  /* add segment to before tail of unacked list, keeping the list sorted */
1169  struct tcp_seg **cur_seg = &(pcb->unacked);
1170  while (*cur_seg &&
1171  TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) {
1172  cur_seg = &((*cur_seg)->next );
1173  }
1174  seg->next = (*cur_seg);
1175  (*cur_seg) = seg;
1176  } else {
1177  /* add segment to tail of unacked list */
1178  useg->next = seg;
1179  useg = useg->next;
1180  }
1181  }
1182  /* do not queue empty segments on the unacked list */
1183  } else {
1184  tcp_seg_free(seg);
1185  }
1186  seg = pcb->unsent;
1187  }
1188 output_done:
1189 #if TCP_OVERSIZE
1190  if (pcb->unsent == NULL) {
1191  /* last unsent has been removed, reset unsent_oversize */
1192  pcb->unsent_oversize = 0;
1193  }
1194 #endif /* TCP_OVERSIZE */
1195 
1196  pcb->flags &= ~TF_NAGLEMEMERR;
1197  return ERR_OK;
1198 }
1199 
1207 static err_t
1208 tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif)
1209 {
1210  err_t err;
1211  u16_t len;
1212  u32_t *opts;
1213 
1214  if (seg->p->ref != 1) {
1215  /* This can happen if the pbuf of this segment is still referenced by the
1216  netif driver due to deferred transmission. Since this function modifies
1217  p->len, we must not continue in this case. */
1218  return ERR_OK;
1219  }
1220 
1221  /* The TCP header has already been constructed, but the ackno and
1222  wnd fields remain. */
1223  seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt);
1224 
1225  /* advertise our receive window size in this TCP segment */
1226 #if LWIP_WND_SCALE
1227  if (seg->flags & TF_SEG_OPTS_WND_SCALE) {
1228  /* The Window field in a SYN segment itself (the only type where we send
1229  the window scale option) is never scaled. */
1230  seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd));
1231  } else
1232 #endif /* LWIP_WND_SCALE */
1233  {
1234  seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd)));
1235  }
1236 
1237  pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
1238 
1239  /* Add any requested options. NB MSS option is only set on SYN
1240  packets, so ignore it here */
1241  /* cast through void* to get rid of alignment warnings */
1242  opts = (u32_t *)(void *)(seg->tcphdr + 1);
1243  if (seg->flags & TF_SEG_OPTS_MSS) {
1244  u16_t mss;
1245 #if TCP_CALCULATE_EFF_SEND_MSS
1246  mss = tcp_eff_send_mss(TCP_MSS, &pcb->local_ip, &pcb->remote_ip);
1247 #else /* TCP_CALCULATE_EFF_SEND_MSS */
1248  mss = TCP_MSS;
1249 #endif /* TCP_CALCULATE_EFF_SEND_MSS */
1250  *opts = TCP_BUILD_MSS_OPTION(mss);
1251  opts += 1;
1252  }
1253 #if LWIP_TCP_TIMESTAMPS
1254  pcb->ts_lastacksent = pcb->rcv_nxt;
1255 
1256  if (seg->flags & TF_SEG_OPTS_TS) {
1257  tcp_build_timestamp_option(pcb, opts);
1258  opts += 3;
1259  }
1260 #endif
1261 #if LWIP_WND_SCALE
1262  if (seg->flags & TF_SEG_OPTS_WND_SCALE) {
1263  tcp_build_wnd_scale_option(opts);
1264  opts += 1;
1265  }
1266 #endif
1267 
1268  /* Set retransmission timer running if it is not currently enabled
1269  This must be set before checking the route. */
1270  if (pcb->rtime < 0) {
1271  pcb->rtime = 0;
1272  }
1273 
1274  if (pcb->rttest == 0) {
1275  pcb->rttest = tcp_ticks;
1276  pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno);
1277 
1278  LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq));
1279  }
1280  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n",
1281  lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) +
1282  seg->len));
1283 
1284  len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload);
1285  if (len == 0) {
1287  MIB2_STATS_INC(mib2.tcpoutsegs);
1288  }
1289 
1290  seg->p->len -= len;
1291  seg->p->tot_len -= len;
1292 
1293  seg->p->payload = seg->tcphdr;
1294 
1295  seg->tcphdr->chksum = 0;
1296 #if CHECKSUM_GEN_TCP
1297  IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
1298 #if TCP_CHECKSUM_ON_COPY
1299  u32_t acc;
1300 #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1301  u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP,
1302  seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip);
1303 #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1304  if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) {
1305  LWIP_ASSERT("data included but not checksummed",
1306  seg->p->tot_len == (TCPH_HDRLEN(seg->tcphdr) * 4));
1307  }
1308 
1309  /* rebuild TCP header checksum (TCP header changes for retransmissions!) */
1310  acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP,
1311  seg->p->tot_len, TCPH_HDRLEN(seg->tcphdr) * 4, &pcb->local_ip, &pcb->remote_ip);
1312  /* add payload checksum */
1313  if (seg->chksum_swapped) {
1314  seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum);
1315  seg->chksum_swapped = 0;
1316  }
1317  acc += (u16_t)~(seg->chksum);
1318  seg->tcphdr->chksum = FOLD_U32T(acc);
1319 #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1320  if (chksum_slow != seg->tcphdr->chksum) {
1321  TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(
1322  ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n",
1323  seg->tcphdr->chksum, chksum_slow));
1324  seg->tcphdr->chksum = chksum_slow;
1325  }
1326 #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1327 #else /* TCP_CHECKSUM_ON_COPY */
1328  seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP,
1329  seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip);
1330 #endif /* TCP_CHECKSUM_ON_COPY */
1331  }
1332 #endif /* CHECKSUM_GEN_TCP */
1333  TCP_STATS_INC(tcp.xmit);
1334 
1335  NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint));
1336  err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl,
1337  pcb->tos, IP_PROTO_TCP, netif);
1338  NETIF_SET_HWADDRHINT(netif, NULL);
1339  return err;
1340 }
1341 
1362 void
1363 tcp_rst(u32_t seqno, u32_t ackno,
1364  const ip_addr_t *local_ip, const ip_addr_t *remote_ip,
1365  u16_t local_port, u16_t remote_port)
1366 {
1367  struct pbuf *p;
1368  struct tcp_hdr *tcphdr;
1369  struct netif *netif;
1370  p = pbuf_alloc(PBUF_IP, TCP_HLEN, PBUF_RAM);
1371  if (p == NULL) {
1372  LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n"));
1373  return;
1374  }
1375  LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
1376  (p->len >= sizeof(struct tcp_hdr)));
1377 
1378  tcphdr = (struct tcp_hdr *)p->payload;
1379  tcphdr->src = lwip_htons(local_port);
1380  tcphdr->dest = lwip_htons(remote_port);
1381  tcphdr->seqno = lwip_htonl(seqno);
1382  tcphdr->ackno = lwip_htonl(ackno);
1383  TCPH_HDRLEN_FLAGS_SET(tcphdr, TCP_HLEN/4, TCP_RST | TCP_ACK);
1384 #if LWIP_WND_SCALE
1385  tcphdr->wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF));
1386 #else
1387  tcphdr->wnd = PP_HTONS(TCP_WND);
1388 #endif
1389  tcphdr->chksum = 0;
1390  tcphdr->urgp = 0;
1391 
1392  TCP_STATS_INC(tcp.xmit);
1393  MIB2_STATS_INC(mib2.tcpoutrsts);
1394 
1395  netif = ip_route(local_ip, remote_ip);
1396  if (netif != NULL) {
1397 #if CHECKSUM_GEN_TCP
1398  IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
1399  tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
1400  local_ip, remote_ip);
1401  }
1402 #endif
1403  /* Send output with hardcoded TTL/HL since we have no access to the pcb */
1404  ip_output_if(p, local_ip, remote_ip, TCP_TTL, 0, IP_PROTO_TCP, netif);
1405  }
1406  pbuf_free(p);
1407  LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno));
1408 }
1409 
1417 void
1418 tcp_rexmit_rto(struct tcp_pcb *pcb)
1419 {
1420  struct tcp_seg *seg;
1421 
1422  if (pcb->unacked == NULL) {
1423  return;
1424  }
1425 
1426  /* Move all unacked segments to the head of the unsent queue */
1427  for (seg = pcb->unacked; seg->next != NULL; seg = seg->next);
1428  /* concatenate unsent queue after unacked queue */
1429  seg->next = pcb->unsent;
1430 #if TCP_OVERSIZE_DBGCHECK
1431  /* if last unsent changed, we need to update unsent_oversize */
1432  if (pcb->unsent == NULL) {
1433  pcb->unsent_oversize = seg->oversize_left;
1434  }
1435 #endif /* TCP_OVERSIZE_DBGCHECK */
1436  /* unsent queue is the concatenated queue (of unacked, unsent) */
1437  pcb->unsent = pcb->unacked;
1438  /* unacked queue is now empty */
1439  pcb->unacked = NULL;
1440 
1441  /* increment number of retransmissions */
1442  if (pcb->nrtx < 0xFF) {
1443  ++pcb->nrtx;
1444  }
1445 
1446  /* Don't take any RTT measurements after retransmitting. */
1447  pcb->rttest = 0;
1448 
1449  /* Do the actual retransmission */
1450  tcp_output(pcb);
1451 }
1452 
1460 void
1461 tcp_rexmit(struct tcp_pcb *pcb)
1462 {
1463  struct tcp_seg *seg;
1464  struct tcp_seg **cur_seg;
1465 
1466  if (pcb->unacked == NULL) {
1467  return;
1468  }
1469 
1470  /* Move the first unacked segment to the unsent queue */
1471  /* Keep the unsent queue sorted. */
1472  seg = pcb->unacked;
1473  pcb->unacked = seg->next;
1474 
1475  cur_seg = &(pcb->unsent);
1476  while (*cur_seg &&
1477  TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) {
1478  cur_seg = &((*cur_seg)->next );
1479  }
1480  seg->next = *cur_seg;
1481  *cur_seg = seg;
1482 #if TCP_OVERSIZE
1483  if (seg->next == NULL) {
1484  /* the retransmitted segment is last in unsent, so reset unsent_oversize */
1485  pcb->unsent_oversize = 0;
1486  }
1487 #endif /* TCP_OVERSIZE */
1488 
1489  if (pcb->nrtx < 0xFF) {
1490  ++pcb->nrtx;
1491  }
1492 
1493  /* Don't take any rtt measurements after retransmitting. */
1494  pcb->rttest = 0;
1495 
1496  /* Do the actual retransmission. */
1497  MIB2_STATS_INC(mib2.tcpretranssegs);
1498  /* No need to call tcp_output: we are always called from tcp_input()
1499  and thus tcp_output directly returns. */
1500 }
1501 
1502 
1508 void
1509 tcp_rexmit_fast(struct tcp_pcb *pcb)
1510 {
1511  if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) {
1512  /* This is fast retransmit. Retransmit the first unacked segment. */
1514  ("tcp_receive: dupacks %"U16_F" (%"U32_F
1515  "), fast retransmit %"U32_F"\n",
1516  (u16_t)pcb->dupacks, pcb->lastack,
1517  lwip_ntohl(pcb->unacked->tcphdr->seqno)));
1518  tcp_rexmit(pcb);
1519 
1520  /* Set ssthresh to half of the minimum of the current
1521  * cwnd and the advertised window */
1522  pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2;
1523 
1524  /* The minimum value for ssthresh should be 2 MSS */
1525  if (pcb->ssthresh < (2U * pcb->mss)) {
1527  ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F
1528  " should be min 2 mss %"U16_F"...\n",
1529  pcb->ssthresh, (u16_t)(2*pcb->mss)));
1530  pcb->ssthresh = 2*pcb->mss;
1531  }
1532 
1533  pcb->cwnd = pcb->ssthresh + 3 * pcb->mss;
1534  pcb->flags |= TF_INFR;
1535 
1536  /* Reset the retransmission timer to prevent immediate rto retransmissions */
1537  pcb->rtime = 0;
1538  }
1539 }
1540 
1541 
1550 err_t
1551 tcp_keepalive(struct tcp_pcb *pcb)
1552 {
1553  err_t err;
1554  struct pbuf *p;
1555  struct netif *netif;
1556 
1557  LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to "));
1558  ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip);
1559  LWIP_DEBUGF(TCP_DEBUG, ("\n"));
1560 
1561  LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
1562  tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent));
1563 
1564  p = tcp_output_alloc_header(pcb, 0, 0, lwip_htonl(pcb->snd_nxt - 1));
1565  if (p == NULL) {
1567  ("tcp_keepalive: could not allocate memory for pbuf\n"));
1568  return ERR_MEM;
1569  }
1570  netif = ip_route(&pcb->local_ip, &pcb->remote_ip);
1571  if (netif == NULL) {
1572  err = ERR_RTE;
1573  } else {
1574 #if CHECKSUM_GEN_TCP
1575  IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
1576  struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload;
1577  tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
1578  &pcb->local_ip, &pcb->remote_ip);
1579  }
1580 #endif /* CHECKSUM_GEN_TCP */
1581  TCP_STATS_INC(tcp.xmit);
1582 
1583  /* Send output to IP */
1584  NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint));
1585  err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP, netif);
1586  NETIF_SET_HWADDRHINT(netif, NULL);
1587  }
1588  pbuf_free(p);
1589 
1590  LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n",
1591  pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err));
1592  return err;
1593 }
1594 
1595 
1604 err_t
1605 tcp_zero_window_probe(struct tcp_pcb *pcb)
1606 {
1607  err_t err;
1608  struct pbuf *p;
1609  struct tcp_hdr *tcphdr;
1610  struct tcp_seg *seg;
1611  u16_t len;
1612  u8_t is_fin;
1613  u32_t snd_nxt;
1614  struct netif *netif;
1615 
1616  LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to "));
1617  ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip);
1618  LWIP_DEBUGF(TCP_DEBUG, ("\n"));
1619 
1621  ("tcp_zero_window_probe: tcp_ticks %"U32_F
1622  " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
1623  tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent));
1624 
1625  seg = pcb->unacked;
1626 
1627  if (seg == NULL) {
1628  seg = pcb->unsent;
1629  }
1630  if (seg == NULL) {
1631  /* nothing to send, zero window probe not needed */
1632  return ERR_OK;
1633  }
1634 
1635  is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0);
1636  /* we want to send one seqno: either FIN or data (no options) */
1637  len = is_fin ? 0 : 1;
1638 
1639  p = tcp_output_alloc_header(pcb, 0, len, seg->tcphdr->seqno);
1640  if (p == NULL) {
1641  LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n"));
1642  return ERR_MEM;
1643  }
1644  tcphdr = (struct tcp_hdr *)p->payload;
1645 
1646  if (is_fin) {
1647  /* FIN segment, no data */
1648  TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN);
1649  } else {
1650  /* Data segment, copy in one byte from the head of the unacked queue */
1651  char *d = ((char *)p->payload + TCP_HLEN);
1652  /* Depending on whether the segment has already been sent (unacked) or not
1653  (unsent), seg->p->payload points to the IP header or TCP header.
1654  Ensure we copy the first TCP data byte: */
1655  pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len);
1656  }
1657 
1658  /* The byte may be acknowledged without the window being opened. */
1659  snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1;
1660  if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
1661  pcb->snd_nxt = snd_nxt;
1662  }
1663 
1664  netif = ip_route(&pcb->local_ip, &pcb->remote_ip);
1665  if (netif == NULL) {
1666  err = ERR_RTE;
1667  } else {
1668 #if CHECKSUM_GEN_TCP
1669  IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
1670  tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
1671  &pcb->local_ip, &pcb->remote_ip);
1672  }
1673 #endif
1674  TCP_STATS_INC(tcp.xmit);
1675 
1676  /* Send output to IP */
1677  NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint));
1678  err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl,
1679  0, IP_PROTO_TCP, netif);
1680  NETIF_SET_HWADDRHINT(netif, NULL);
1681  }
1682 
1683  pbuf_free(p);
1684 
1685  LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F
1686  " ackno %"U32_F" err %d.\n",
1687  pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err));
1688  return err;
1689 }
1690 #endif /* LWIP_TCP */
u16_t tot_len
Definition: pbuf.h:175
Definition: pbuf.h:113
struct pbuf * next
Definition: pbuf.h:163
u16_t pbuf_clen(const struct pbuf *p)
Definition: pbuf.c:819
u16_t len
Definition: pbuf.h:178
Definition: err.h:86
Definition: pbuf.h:131
u8_t pbuf_header(struct pbuf *p, s16_t header_size_increment)
Definition: pbuf.c:684
#define TCP_QLEN_DEBUG
Definition: opt.h:2819
Definition: err.h:84
#define TCP_FR_DEBUG
Definition: opt.h:2776
#define TCP_RST_DEBUG
Definition: opt.h:2812
#define TCP_CWND_DEBUG
Definition: opt.h:2791
#define TCP_TTL
Definition: opt.h:1158
Definition: pbuf.h:199
#define TCP_DEBUG
Definition: opt.h:2762
u32_t sys_now(void)
Definition: sys_arch.cc:57
Definition: err.h:115
Definition: pbuf.h:161
#define SWAP_BYTES_IN_WORD(w)
Definition: inet_chksum.h:66
#define FOLD_U32T(u)
Definition: inet_chksum.h:71
Definition: netif.h:244
void pbuf_cat(struct pbuf *h, struct pbuf *t)
Definition: pbuf.c:859
pbuf_layer
Definition: pbuf.h:91
#define TCP_RTO_DEBUG
Definition: opt.h:2784
s8_t err_t
Definition: err.h:76
Definition: err.h:104
#define LWIP_DEBUGF(debug, message)
#define LWIP_MEM_ALIGN_SIZE(size)
Definition: arch.h:233
struct pbuf * pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
Definition: pbuf.c:267
u16_t pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
Definition: pbuf.c:1034
Definition: pbuf.h:127
#define LWIP_UNUSED_ARG(x)
Definition: arch.h:327
u8_t pbuf_free(struct pbuf *p)
Definition: pbuf.c:734
Definition: err.h:82
void * payload
Definition: pbuf.h:166
Definition: pbuf.h:99
Definition: err.h:90
u8_t type
Definition: pbuf.h:181
void * memp_malloc(memp_t type)
Definition: memp.c:404
#define TCP_SND_QUEUELEN
Definition: opt.h:1231
#define TCP_OUTPUT_DEBUG
Definition: opt.h:2805