1 /** @file
2
3 A brief file description
4
5 @section license License
6
7 Licensed to the Apache Software Foundation (ASF) under one
8 or more contributor license agreements. See the NOTICE file
9 distributed with this work for additional information
10 regarding copyright ownership. The ASF licenses this file
11 to you under the Apache License, Version 2.0 (the
12 "License"); you may not use this file except in compliance
13 with the License. You may obtain a copy of the License at
14
15 http://www.apache.org/licenses/LICENSE-2.0
16
17 Unless required by applicable law or agreed to in writing, software
18 distributed under the License is distributed on an "AS IS" BASIS,
19 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 See the License for the specific language governing permissions and
21 limitations under the License.
22 */
23
24 #pragma once
25
26 #include "tscore/ink_platform.h"
27 #include "tscore/ink_resource.h"
28
29 // TODO: I think we're overly aggressive here on making MIOBuffer 64-bit
30 // but not sure it's worthwhile changing anything to 32-bit honestly.
31
32 //////////////////////////////////////////////////////////////
33 //
34 // returns 0 for DEFAULT_BUFFER_BASE_SIZE,
35 // +1 for each power of 2
36 //
37 //////////////////////////////////////////////////////////////
38 TS_INLINE int64_t
buffer_size_to_index(int64_t size,int64_t max)39 buffer_size_to_index(int64_t size, int64_t max)
40 {
41 int64_t r = max;
42
43 while (r && BUFFER_SIZE_FOR_INDEX(r - 1) >= size) {
44 r--;
45 }
46 return r;
47 }
48
49 TS_INLINE int64_t
iobuffer_size_to_index(int64_t size,int64_t max)50 iobuffer_size_to_index(int64_t size, int64_t max)
51 {
52 if (size > BUFFER_SIZE_FOR_INDEX(max)) {
53 return BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(size);
54 }
55 return buffer_size_to_index(size, max);
56 }
57
58 TS_INLINE int64_t
index_to_buffer_size(int64_t idx)59 index_to_buffer_size(int64_t idx)
60 {
61 if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(idx)) {
62 return BUFFER_SIZE_FOR_INDEX(idx);
63 } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(idx)) {
64 return BUFFER_SIZE_FOR_XMALLOC(idx);
65 // coverity[dead_error_condition]
66 } else if (BUFFER_SIZE_INDEX_IS_CONSTANT(idx)) {
67 return BUFFER_SIZE_FOR_CONSTANT(idx);
68 }
69 // coverity[dead_error_line]
70 return 0;
71 }
72
73 TS_INLINE IOBufferBlock *
iobufferblock_clone(IOBufferBlock * src,int64_t offset,int64_t len)74 iobufferblock_clone(IOBufferBlock *src, int64_t offset, int64_t len)
75 {
76 IOBufferBlock *start_buf = nullptr;
77 IOBufferBlock *current_buf = nullptr;
78
79 while (src && len >= 0) {
80 char *start = src->_start;
81 char *end = src->_end;
82 int64_t max_bytes = end - start;
83
84 max_bytes -= offset;
85 if (max_bytes <= 0) {
86 offset = -max_bytes;
87 src = src->next.get();
88 continue;
89 }
90
91 int64_t bytes = len;
92 if (bytes >= max_bytes) {
93 bytes = max_bytes;
94 }
95
96 IOBufferBlock *new_buf = src->clone();
97 new_buf->_start += offset;
98 new_buf->_buf_end = new_buf->_end = new_buf->_start + bytes;
99
100 if (!start_buf) {
101 start_buf = new_buf;
102 current_buf = start_buf;
103 } else {
104 current_buf->next = new_buf;
105 current_buf = new_buf;
106 }
107
108 len -= bytes;
109 src = src->next.get();
110 offset = 0;
111 }
112
113 return start_buf;
114 }
115
116 TS_INLINE IOBufferBlock *
iobufferblock_skip(IOBufferBlock * b,int64_t * poffset,int64_t * plen,int64_t write)117 iobufferblock_skip(IOBufferBlock *b, int64_t *poffset, int64_t *plen, int64_t write)
118 {
119 int64_t offset = *poffset;
120 int64_t len = write;
121
122 while (b && len >= 0) {
123 int64_t max_bytes = b->read_avail();
124
125 // If this block ends before the start offset, skip it
126 // and adjust the offset to consume its length.
127 max_bytes -= offset;
128 if (max_bytes <= 0) {
129 offset = -max_bytes;
130 b = b->next.get();
131 continue;
132 }
133
134 if (len >= max_bytes) {
135 b = b->next.get();
136 len -= max_bytes;
137 offset = 0;
138 } else {
139 offset = offset + len;
140 break;
141 }
142 }
143
144 *poffset = offset;
145 *plen -= write;
146 return b;
147 }
148
149 TS_INLINE void
iobuffer_mem_inc(const char * _loc,int64_t _size_index)150 iobuffer_mem_inc(const char *_loc, int64_t _size_index)
151 {
152 if (!res_track_memory) {
153 return;
154 }
155
156 if (!BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
157 return;
158 }
159
160 if (!_loc) {
161 _loc = "memory/IOBuffer/UNKNOWN-LOCATION";
162 }
163 ResourceTracker::increment(_loc, index_to_buffer_size(_size_index));
164 }
165
166 TS_INLINE void
iobuffer_mem_dec(const char * _loc,int64_t _size_index)167 iobuffer_mem_dec(const char *_loc, int64_t _size_index)
168 {
169 if (!res_track_memory) {
170 return;
171 }
172
173 if (!BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
174 return;
175 }
176 if (!_loc) {
177 _loc = "memory/IOBuffer/UNKNOWN-LOCATION";
178 }
179 ResourceTracker::increment(_loc, -index_to_buffer_size(_size_index));
180 }
181
182 //////////////////////////////////////////////////////////////////
183 //
184 // inline functions definitions
185 //
186 //////////////////////////////////////////////////////////////////
187 //////////////////////////////////////////////////////////////////
188 //
189 // class IOBufferData --
190 // inline functions definitions
191 //
192 //////////////////////////////////////////////////////////////////
193 TS_INLINE int64_t
block_size()194 IOBufferData::block_size()
195 {
196 return index_to_buffer_size(_size_index);
197 }
198
199 TS_INLINE IOBufferData *
new_IOBufferData_internal(const char * location,void * b,int64_t size,int64_t asize_index)200 new_IOBufferData_internal(const char *location, void *b, int64_t size, int64_t asize_index)
201 {
202 (void)size;
203 IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
204 d->_size_index = asize_index;
205 ink_assert(BUFFER_SIZE_INDEX_IS_CONSTANT(asize_index) || size <= d->block_size());
206 d->_location = location;
207 d->_data = (char *)b;
208 return d;
209 }
210
211 TS_INLINE IOBufferData *
new_xmalloc_IOBufferData_internal(const char * location,void * b,int64_t size)212 new_xmalloc_IOBufferData_internal(const char *location, void *b, int64_t size)
213 {
214 return new_IOBufferData_internal(location, b, size, BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(size));
215 }
216
217 TS_INLINE IOBufferData *
new_IOBufferData_internal(const char * loc,int64_t size_index,AllocType type)218 new_IOBufferData_internal(const char *loc, int64_t size_index, AllocType type)
219 {
220 IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
221 d->_location = loc;
222 d->alloc(size_index, type);
223 return d;
224 }
225
226 // IRIX has a compiler bug which prevents this function
227 // from being compiled correctly at -O3
228 // so it is DUPLICATED in IOBuffer.cc
229 // ****** IF YOU CHANGE THIS FUNCTION change that one as well.
230 TS_INLINE void
alloc(int64_t size_index,AllocType type)231 IOBufferData::alloc(int64_t size_index, AllocType type)
232 {
233 if (_data) {
234 dealloc();
235 }
236 _size_index = size_index;
237 _mem_type = type;
238 iobuffer_mem_inc(_location, size_index);
239 switch (type) {
240 case MEMALIGNED:
241 if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(size_index)) {
242 _data = (char *)ioBufAllocator[size_index].alloc_void();
243 // coverity[dead_error_condition]
244 } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(size_index)) {
245 _data = (char *)ats_memalign(ats_pagesize(), index_to_buffer_size(size_index));
246 }
247 break;
248 default:
249 case DEFAULT_ALLOC:
250 if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(size_index)) {
251 _data = (char *)ioBufAllocator[size_index].alloc_void();
252 } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(size_index)) {
253 _data = (char *)ats_malloc(BUFFER_SIZE_FOR_XMALLOC(size_index));
254 }
255 break;
256 }
257 }
258
259 // ****** IF YOU CHANGE THIS FUNCTION change that one as well.
260
261 TS_INLINE void
dealloc()262 IOBufferData::dealloc()
263 {
264 iobuffer_mem_dec(_location, _size_index);
265 switch (_mem_type) {
266 case MEMALIGNED:
267 if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
268 ioBufAllocator[_size_index].free_void(_data);
269 } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(_size_index)) {
270 ::free((void *)_data);
271 }
272 break;
273 default:
274 case DEFAULT_ALLOC:
275 if (BUFFER_SIZE_INDEX_IS_FAST_ALLOCATED(_size_index)) {
276 ioBufAllocator[_size_index].free_void(_data);
277 } else if (BUFFER_SIZE_INDEX_IS_XMALLOCED(_size_index)) {
278 ats_free(_data);
279 }
280 break;
281 }
282 _data = nullptr;
283 _size_index = BUFFER_SIZE_NOT_ALLOCATED;
284 _mem_type = NO_ALLOC;
285 }
286
287 TS_INLINE void
free()288 IOBufferData::free()
289 {
290 dealloc();
291 THREAD_FREE(this, ioDataAllocator, this_thread());
292 }
293
294 //////////////////////////////////////////////////////////////////
295 //
296 // class IOBufferBlock --
297 // inline functions definitions
298 //
299 //////////////////////////////////////////////////////////////////
300 TS_INLINE IOBufferBlock *
new_IOBufferBlock_internal(const char * location)301 new_IOBufferBlock_internal(const char *location)
302 {
303 IOBufferBlock *b = THREAD_ALLOC(ioBlockAllocator, this_thread());
304 b->_location = location;
305 return b;
306 }
307
308 TS_INLINE IOBufferBlock *
new_IOBufferBlock_internal(const char * location,IOBufferData * d,int64_t len,int64_t offset)309 new_IOBufferBlock_internal(const char *location, IOBufferData *d, int64_t len, int64_t offset)
310 {
311 IOBufferBlock *b = THREAD_ALLOC(ioBlockAllocator, this_thread());
312 b->_location = location;
313 b->set(d, len, offset);
314 return b;
315 }
316
317 TS_INLINE
IOBufferBlock()318 IOBufferBlock::IOBufferBlock()
319 {
320 return;
321 }
322
323 TS_INLINE void
consume(int64_t len)324 IOBufferBlock::consume(int64_t len)
325 {
326 _start += len;
327 ink_assert(_start <= _end);
328 }
329
330 TS_INLINE void
fill(int64_t len)331 IOBufferBlock::fill(int64_t len)
332 {
333 _end += len;
334 ink_assert(_end <= _buf_end);
335 }
336
337 TS_INLINE void
reset()338 IOBufferBlock::reset()
339 {
340 _end = _start = buf();
341 _buf_end = buf() + data->block_size();
342 }
343
344 TS_INLINE void
alloc(int64_t i)345 IOBufferBlock::alloc(int64_t i)
346 {
347 ink_assert(BUFFER_SIZE_ALLOCATED(i));
348 data = new_IOBufferData_internal(_location, i);
349 reset();
350 }
351
352 TS_INLINE void
clear()353 IOBufferBlock::clear()
354 {
355 data = nullptr;
356
357 IOBufferBlock *p = next.get();
358 while (p) {
359 // If our block pointer refcount dropped to zero,
360 // recursively free the list.
361 if (p->refcount_dec() == 0) {
362 IOBufferBlock *n = p->next.detach();
363 p->free();
364 p = n;
365 } else {
366 // We don't hold the last refcount, so we are done.
367 break;
368 }
369 }
370
371 // Nuke the next pointer without dropping the refcount
372 // because we already manually did that.
373 next.detach();
374
375 _buf_end = _end = _start = nullptr;
376 }
377
378 TS_INLINE IOBufferBlock *
clone()379 IOBufferBlock::clone() const
380 {
381 IOBufferBlock *b = new_IOBufferBlock_internal(_location);
382 b->data = data;
383 b->_start = _start;
384 b->_end = _end;
385 b->_buf_end = _end;
386 b->_location = _location;
387 return b;
388 }
389
390 TS_INLINE void
dealloc()391 IOBufferBlock::dealloc()
392 {
393 clear();
394 }
395
396 TS_INLINE void
free()397 IOBufferBlock::free()
398 {
399 dealloc();
400 THREAD_FREE(this, ioBlockAllocator, this_thread());
401 }
402
403 TS_INLINE void
set_internal(void * b,int64_t len,int64_t asize_index)404 IOBufferBlock::set_internal(void *b, int64_t len, int64_t asize_index)
405 {
406 data = new_IOBufferData_internal(_location, BUFFER_SIZE_NOT_ALLOCATED);
407 data->_data = (char *)b;
408 iobuffer_mem_inc(_location, asize_index);
409 data->_size_index = asize_index;
410 reset();
411 _end = _start + len;
412 }
413
414 TS_INLINE void
set(IOBufferData * d,int64_t len,int64_t offset)415 IOBufferBlock::set(IOBufferData *d, int64_t len, int64_t offset)
416 {
417 data = d;
418 _start = buf() + offset;
419 _end = _start + len;
420 _buf_end = buf() + d->block_size();
421 }
422
423 //////////////////////////////////////////////////////////////////
424 //
425 // class IOBufferReader --
426 // inline functions definitions
427 //
428 //////////////////////////////////////////////////////////////////
429 TS_INLINE void
skip_empty_blocks()430 IOBufferReader::skip_empty_blocks()
431 {
432 while (block->next && block->next->read_avail() && start_offset >= block->size()) {
433 start_offset -= block->size();
434 block = block->next;
435 }
436 }
437
438 TS_INLINE bool
low_water()439 IOBufferReader::low_water()
440 {
441 return mbuf->low_water();
442 }
443
444 TS_INLINE bool
high_water()445 IOBufferReader::high_water()
446 {
447 return read_avail() >= mbuf->water_mark;
448 }
449
450 TS_INLINE bool
current_low_water()451 IOBufferReader::current_low_water()
452 {
453 return mbuf->current_low_water();
454 }
455
456 TS_INLINE IOBufferBlock *
get_current_block()457 IOBufferReader::get_current_block()
458 {
459 return block.get();
460 }
461
462 TS_INLINE char *
start()463 IOBufferReader::start()
464 {
465 if (!block) {
466 return nullptr;
467 }
468
469 skip_empty_blocks();
470 return block->start() + start_offset;
471 }
472
473 TS_INLINE char *
end()474 IOBufferReader::end()
475 {
476 if (!block) {
477 return nullptr;
478 }
479
480 skip_empty_blocks();
481 return block->end();
482 }
483
484 TS_INLINE int64_t
block_read_avail()485 IOBufferReader::block_read_avail()
486 {
487 if (!block) {
488 return 0;
489 }
490
491 skip_empty_blocks();
492 return (int64_t)(block->end() - (block->start() + start_offset));
493 }
494
495 inline std::string_view
block_read_view()496 IOBufferReader::block_read_view()
497 {
498 const char *start = this->start(); // empty blocks are skipped in here.
499 return start ? std::string_view{start, static_cast<size_t>(block->end() - start)} : std::string_view{};
500 }
501
502 TS_INLINE int
block_count()503 IOBufferReader::block_count()
504 {
505 int count = 0;
506 IOBufferBlock *b = block.get();
507
508 while (b) {
509 count++;
510 b = b->next.get();
511 }
512
513 return count;
514 }
515
516 TS_INLINE int64_t
read_avail()517 IOBufferReader::read_avail()
518 {
519 int64_t t = 0;
520 IOBufferBlock *b = block.get();
521
522 while (b) {
523 t += b->read_avail();
524 b = b->next.get();
525 }
526
527 t -= start_offset;
528 if (size_limit != INT64_MAX && t > size_limit) {
529 t = size_limit;
530 }
531
532 return t;
533 }
534
535 TS_INLINE bool
is_read_avail_more_than(int64_t size)536 IOBufferReader::is_read_avail_more_than(int64_t size)
537 {
538 int64_t t = -start_offset;
539 IOBufferBlock *b = block.get();
540
541 while (b) {
542 t += b->read_avail();
543 if (t > size) {
544 return true;
545 }
546 b = b->next.get();
547 }
548 return false;
549 }
550
551 TS_INLINE void
consume(int64_t n)552 IOBufferReader::consume(int64_t n)
553 {
554 start_offset += n;
555 if (size_limit != INT64_MAX) {
556 size_limit -= n;
557 }
558
559 ink_assert(size_limit >= 0);
560 if (!block) {
561 return;
562 }
563
564 int64_t r = block->read_avail();
565 int64_t s = start_offset;
566 while (r <= s && block->next && block->next->read_avail()) {
567 s -= r;
568 start_offset = s;
569 block = block->next;
570 r = block->read_avail();
571 }
572 }
573
574 TS_INLINE char &
575 IOBufferReader::operator[](int64_t i)
576 {
577 static char default_ret = '\0'; // This is just to avoid compiler warnings...
578 IOBufferBlock *b = block.get();
579
580 i += start_offset;
581 while (b) {
582 int64_t bytes = b->read_avail();
583 if (bytes > i) {
584 return b->start()[i];
585 }
586 i -= bytes;
587 b = b->next.get();
588 }
589
590 ink_release_assert(!"out of range");
591 // Never used, just to satisfy compilers not understanding the fatality of ink_release_assert().
592 return default_ret;
593 }
594
595 TS_INLINE void
clear()596 IOBufferReader::clear()
597 {
598 accessor = nullptr;
599 block = nullptr;
600 mbuf = nullptr;
601 start_offset = 0;
602 size_limit = INT64_MAX;
603 }
604
605 TS_INLINE void
reset()606 IOBufferReader::reset()
607 {
608 block = mbuf->_writer;
609 start_offset = 0;
610 size_limit = INT64_MAX;
611 }
612
613 ////////////////////////////////////////////////////////////////
614 //
615 // class MIOBuffer --
616 // inline functions definitions
617 //
618 ////////////////////////////////////////////////////////////////
619 inkcoreapi extern ClassAllocator<MIOBuffer> ioAllocator;
620 ////////////////////////////////////////////////////////////////
621 //
622 // MIOBuffer::MIOBuffer()
623 //
624 // This constructor accepts a pre-allocated memory buffer,
625 // wraps if in a IOBufferData and IOBufferBlock structures
626 // and sets it as the current block.
627 // NOTE that in this case the memory buffer will not be freed
628 // by the MIOBuffer class. It is the user responsibility to
629 // free the memory buffer. The wrappers (MIOBufferBlock and
630 // MIOBufferData) will be freed by this class.
631 //
632 ////////////////////////////////////////////////////////////////
633 TS_INLINE
MIOBuffer(void * b,int64_t bufsize,int64_t aWater_mark)634 MIOBuffer::MIOBuffer(void *b, int64_t bufsize, int64_t aWater_mark)
635 {
636 _location = nullptr;
637 set(b, bufsize);
638 water_mark = aWater_mark;
639 size_index = BUFFER_SIZE_NOT_ALLOCATED;
640 return;
641 }
642
643 TS_INLINE
MIOBuffer(int64_t default_size_index)644 MIOBuffer::MIOBuffer(int64_t default_size_index)
645 {
646 clear();
647 size_index = default_size_index;
648 _location = nullptr;
649 return;
650 }
651
652 TS_INLINE
MIOBuffer()653 MIOBuffer::MIOBuffer()
654 {
655 clear();
656 _location = nullptr;
657 return;
658 }
659
660 TS_INLINE
~MIOBuffer()661 MIOBuffer::~MIOBuffer()
662 {
663 _writer = nullptr;
664 dealloc_all_readers();
665 }
666
667 TS_INLINE MIOBuffer *
new_MIOBuffer_internal(const char * location,int64_t size_index)668 new_MIOBuffer_internal(const char *location, int64_t size_index)
669 {
670 MIOBuffer *b = THREAD_ALLOC(ioAllocator, this_thread());
671 b->_location = location;
672 b->alloc(size_index);
673 b->water_mark = 0;
674 return b;
675 }
676
677 TS_INLINE void
free_MIOBuffer(MIOBuffer * mio)678 free_MIOBuffer(MIOBuffer *mio)
679 {
680 mio->_writer = nullptr;
681 mio->dealloc_all_readers();
682 THREAD_FREE(mio, ioAllocator, this_thread());
683 }
684
685 TS_INLINE MIOBuffer *
new_empty_MIOBuffer_internal(const char * location,int64_t size_index)686 new_empty_MIOBuffer_internal(const char *location, int64_t size_index)
687 {
688 MIOBuffer *b = THREAD_ALLOC(ioAllocator, this_thread());
689 b->size_index = size_index;
690 b->water_mark = 0;
691 b->_location = location;
692 return b;
693 }
694
695 TS_INLINE void
free_empty_MIOBuffer(MIOBuffer * mio)696 free_empty_MIOBuffer(MIOBuffer *mio)
697 {
698 THREAD_FREE(mio, ioAllocator, this_thread());
699 }
700
701 TS_INLINE IOBufferReader *
alloc_accessor(MIOBufferAccessor * anAccessor)702 MIOBuffer::alloc_accessor(MIOBufferAccessor *anAccessor)
703 {
704 int i;
705 for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
706 if (!readers[i].allocated()) {
707 break;
708 }
709 }
710
711 // TODO refactor code to return nullptr at some point
712 ink_release_assert(i < MAX_MIOBUFFER_READERS);
713
714 IOBufferReader *e = &readers[i];
715 e->mbuf = this;
716 e->reset();
717 e->accessor = anAccessor;
718
719 return e;
720 }
721
722 TS_INLINE IOBufferReader *
alloc_reader()723 MIOBuffer::alloc_reader()
724 {
725 int i;
726 for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
727 if (!readers[i].allocated()) {
728 break;
729 }
730 }
731
732 // TODO refactor code to return nullptr at some point
733 ink_release_assert(i < MAX_MIOBUFFER_READERS);
734
735 IOBufferReader *e = &readers[i];
736 e->mbuf = this;
737 e->reset();
738 e->accessor = nullptr;
739
740 return e;
741 }
742
743 TS_INLINE int64_t
block_size()744 MIOBuffer::block_size()
745 {
746 return index_to_buffer_size(size_index);
747 }
748 TS_INLINE IOBufferReader *
clone_reader(IOBufferReader * r)749 MIOBuffer::clone_reader(IOBufferReader *r)
750 {
751 int i;
752 for (i = 0; i < MAX_MIOBUFFER_READERS; i++) {
753 if (!readers[i].allocated()) {
754 break;
755 }
756 }
757
758 // TODO refactor code to return nullptr at some point
759 ink_release_assert(i < MAX_MIOBUFFER_READERS);
760
761 IOBufferReader *e = &readers[i];
762 e->mbuf = this;
763 e->accessor = nullptr;
764 e->block = r->block;
765 e->start_offset = r->start_offset;
766 e->size_limit = r->size_limit;
767 ink_assert(e->size_limit >= 0);
768
769 return e;
770 }
771
772 TS_INLINE int64_t
block_write_avail()773 MIOBuffer::block_write_avail()
774 {
775 IOBufferBlock *b = first_write_block();
776 return b ? b->write_avail() : 0;
777 }
778
779 ////////////////////////////////////////////////////////////////
780 //
781 // MIOBuffer::append_block()
782 //
783 // Appends a block to writer->next and make it the current
784 // block.
785 // Note that the block is not appended to the end of the list.
786 // That means that if writer->next was not null before this
787 // call then the block that writer->next was pointing to will
788 // have its reference count decremented and writer->next
789 // will have a new value which is the new block.
790 // In any case the new appended block becomes the current
791 // block.
792 //
793 ////////////////////////////////////////////////////////////////
794 TS_INLINE void
append_block_internal(IOBufferBlock * b)795 MIOBuffer::append_block_internal(IOBufferBlock *b)
796 {
797 // It would be nice to remove an empty buffer at the beginning,
798 // but this breaks HTTP.
799 // if (!_writer || !_writer->read_avail())
800 if (!_writer) {
801 _writer = b;
802 init_readers();
803 } else {
804 ink_assert(!_writer->next || !_writer->next->read_avail());
805 _writer->next = b;
806 while (b->read_avail()) {
807 _writer = b;
808 b = b->next.get();
809 if (!b) {
810 break;
811 }
812 }
813 }
814 while (_writer->next && !_writer->write_avail() && _writer->next->read_avail()) {
815 _writer = _writer->next;
816 }
817 }
818
819 TS_INLINE void
append_block(IOBufferBlock * b)820 MIOBuffer::append_block(IOBufferBlock *b)
821 {
822 ink_assert(b->read_avail());
823 append_block_internal(b);
824 }
825
826 ////////////////////////////////////////////////////////////////
827 //
828 // MIOBuffer::append_block()
829 //
830 // Allocate a block, appends it to current->next
831 // and make the new block the current block (writer).
832 //
833 ////////////////////////////////////////////////////////////////
834 TS_INLINE void
append_block(int64_t asize_index)835 MIOBuffer::append_block(int64_t asize_index)
836 {
837 ink_assert(BUFFER_SIZE_ALLOCATED(asize_index));
838 IOBufferBlock *b = new_IOBufferBlock_internal(_location);
839 b->alloc(asize_index);
840 append_block_internal(b);
841 return;
842 }
843
844 TS_INLINE void
add_block()845 MIOBuffer::add_block()
846 {
847 if (this->_writer == nullptr || this->_writer->next == nullptr) {
848 append_block(size_index);
849 }
850 }
851
852 TS_INLINE void
check_add_block()853 MIOBuffer::check_add_block()
854 {
855 if (!high_water() && current_low_water()) {
856 add_block();
857 }
858 }
859
860 TS_INLINE IOBufferBlock *
get_current_block()861 MIOBuffer::get_current_block()
862 {
863 return first_write_block();
864 }
865
866 //////////////////////////////////////////////////////////////////
867 //
868 // MIOBuffer::current_write_avail()
869 //
870 // returns the total space available in all blocks.
871 // This function is different than write_avail() because
872 // it will not append a new block if there is no space
873 // or below the watermark space available.
874 //
875 //////////////////////////////////////////////////////////////////
876 TS_INLINE int64_t
current_write_avail()877 MIOBuffer::current_write_avail()
878 {
879 int64_t t = 0;
880 IOBufferBlock *b = _writer.get();
881 while (b) {
882 t += b->write_avail();
883 b = b->next.get();
884 }
885 return t;
886 }
887
888 //////////////////////////////////////////////////////////////////
889 //
890 // MIOBuffer::write_avail()
891 //
892 // returns the number of bytes available in the current block.
893 // If there is no current block or not enough free space in
894 // the current block then a new block is appended.
895 //
896 //////////////////////////////////////////////////////////////////
897 TS_INLINE int64_t
write_avail()898 MIOBuffer::write_avail()
899 {
900 check_add_block();
901 return current_write_avail();
902 }
903
904 TS_INLINE void
fill(int64_t len)905 MIOBuffer::fill(int64_t len)
906 {
907 int64_t f = _writer->write_avail();
908 while (f < len) {
909 _writer->fill(f);
910 len -= f;
911 if (len > 0) {
912 _writer = _writer->next;
913 }
914 f = _writer->write_avail();
915 }
916 _writer->fill(len);
917 }
918
919 TS_INLINE int
max_block_count()920 MIOBuffer::max_block_count()
921 {
922 int maxb = 0;
923 for (auto &reader : readers) {
924 if (reader.allocated()) {
925 int c = reader.block_count();
926 if (c > maxb) {
927 maxb = c;
928 }
929 }
930 }
931 return maxb;
932 }
933
934 TS_INLINE int64_t
max_read_avail()935 MIOBuffer::max_read_avail()
936 {
937 int64_t s = 0;
938 int found = 0;
939 for (auto &reader : readers) {
940 if (reader.allocated()) {
941 int64_t ss = reader.read_avail();
942 if (ss > s) {
943 s = ss;
944 }
945 found = 1;
946 }
947 }
948 if (!found && _writer) {
949 return _writer->read_avail();
950 }
951 return s;
952 }
953
954 TS_INLINE void
set(void * b,int64_t len)955 MIOBuffer::set(void *b, int64_t len)
956 {
957 _writer = new_IOBufferBlock_internal(_location);
958 _writer->set_internal(b, len, BUFFER_SIZE_INDEX_FOR_CONSTANT_SIZE(len));
959 init_readers();
960 }
961
962 TS_INLINE void
append_xmalloced(void * b,int64_t len)963 MIOBuffer::append_xmalloced(void *b, int64_t len)
964 {
965 IOBufferBlock *x = new_IOBufferBlock_internal(_location);
966 x->set_internal(b, len, BUFFER_SIZE_INDEX_FOR_XMALLOC_SIZE(len));
967 append_block_internal(x);
968 }
969
970 TS_INLINE void
append_fast_allocated(void * b,int64_t len,int64_t fast_size_index)971 MIOBuffer::append_fast_allocated(void *b, int64_t len, int64_t fast_size_index)
972 {
973 IOBufferBlock *x = new_IOBufferBlock_internal(_location);
974 x->set_internal(b, len, fast_size_index);
975 append_block_internal(x);
976 }
977
978 TS_INLINE void
alloc(int64_t i)979 MIOBuffer::alloc(int64_t i)
980 {
981 _writer = new_IOBufferBlock_internal(_location);
982 _writer->alloc(i);
983 size_index = i;
984 init_readers();
985 }
986
987 TS_INLINE void
dealloc_reader(IOBufferReader * e)988 MIOBuffer::dealloc_reader(IOBufferReader *e)
989 {
990 if (e->accessor) {
991 ink_assert(e->accessor->writer() == this);
992 ink_assert(e->accessor->reader() == e);
993 e->accessor->clear();
994 }
995 e->clear();
996 }
997
998 TS_INLINE IOBufferReader *
clone()999 IOBufferReader::clone()
1000 {
1001 return mbuf->clone_reader(this);
1002 }
1003
1004 TS_INLINE void
dealloc()1005 IOBufferReader::dealloc()
1006 {
1007 mbuf->dealloc_reader(this);
1008 }
1009
1010 TS_INLINE void
dealloc_all_readers()1011 MIOBuffer::dealloc_all_readers()
1012 {
1013 for (auto &reader : readers) {
1014 if (reader.allocated()) {
1015 dealloc_reader(&reader);
1016 }
1017 }
1018 }
1019
1020 TS_INLINE void
reader_for(MIOBuffer * abuf)1021 MIOBufferAccessor::reader_for(MIOBuffer *abuf)
1022 {
1023 mbuf = abuf;
1024 if (abuf) {
1025 entry = mbuf->alloc_accessor(this);
1026 } else {
1027 entry = nullptr;
1028 }
1029 }
1030
1031 TS_INLINE void
reader_for(IOBufferReader * areader)1032 MIOBufferAccessor::reader_for(IOBufferReader *areader)
1033 {
1034 if (entry == areader) {
1035 return;
1036 }
1037 mbuf = areader->mbuf;
1038 entry = areader;
1039 ink_assert(mbuf);
1040 }
1041
1042 TS_INLINE void
writer_for(MIOBuffer * abuf)1043 MIOBufferAccessor::writer_for(MIOBuffer *abuf)
1044 {
1045 mbuf = abuf;
1046 entry = nullptr;
1047 }
1048
1049 TS_INLINE
~MIOBufferAccessor()1050 MIOBufferAccessor::~MIOBufferAccessor() {}
1051