1 /** @file
2
3 A brief file description
4
5 @section license License
6
7 Licensed to the Apache Software Foundation (ASF) under one
8 or more contributor license agreements. See the NOTICE file
9 distributed with this work for additional information
10 regarding copyright ownership. The ASF licenses this file
11 to you under the Apache License, Version 2.0 (the
12 "License"); you may not use this file except in compliance
13 with the License. You may obtain a copy of the License at
14
15 http://www.apache.org/licenses/LICENSE-2.0
16
17 Unless required by applicable law or agreed to in writing, software
18 distributed under the License is distributed on an "AS IS" BASIS,
19 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 See the License for the specific language governing permissions and
21 limitations under the License.
22 */
23
24 /*
25 * This file contains all the functions exported by the IOCore to the SDK.
26 * Any IOCore symbol accessed by a plugin directly should be called in this
27 * file to ensure that it gets exported as a global symbol in TS
28 */
29
30 #include "tscore/ink_platform.h"
31 #include "ts/ts.h"
32 #include "ts/InkAPIPrivateIOCore.h"
33 #if defined(solaris) && !defined(__GNUC__)
34 #include "P_EventSystem.h" // I_EventSystem.h
35 #include "P_Net.h" // I_Net.h
36 #else
37 #include "I_EventSystem.h"
38 #include "I_Net.h"
39 #endif
40 #include "I_Cache.h"
41 #include "I_HostDB.h"
42
43 // This assert is for internal API use only.
44 #if TS_USE_FAST_SDK
45 #define sdk_assert(EX) (void)(EX)
46 #else
47 #define sdk_assert(EX) ((void)((EX) ? (void)0 : _TSReleaseAssert(#EX, __FILE__, __LINE__)))
48 #endif
49
50 TSReturnCode
sdk_sanity_check_mutex(TSMutex mutex)51 sdk_sanity_check_mutex(TSMutex mutex)
52 {
53 if (mutex == nullptr) {
54 return TS_ERROR;
55 }
56
57 ProxyMutex *mutexp = reinterpret_cast<ProxyMutex *>(mutex);
58
59 if (mutexp->refcount() < 0) {
60 return TS_ERROR;
61 }
62 if (mutexp->nthread_holding < 0) {
63 return TS_ERROR;
64 }
65
66 return TS_SUCCESS;
67 }
68
69 TSReturnCode
sdk_sanity_check_hostlookup_structure(TSHostLookupResult data)70 sdk_sanity_check_hostlookup_structure(TSHostLookupResult data)
71 {
72 if (data == nullptr) {
73 return TS_ERROR;
74 }
75
76 return TS_SUCCESS;
77 }
78
79 TSReturnCode
sdk_sanity_check_iocore_structure(void * data)80 sdk_sanity_check_iocore_structure(void *data)
81 {
82 if (data == nullptr) {
83 return TS_ERROR;
84 }
85
86 return TS_SUCCESS;
87 }
88
89 // From InkAPI.cc
90 TSReturnCode sdk_sanity_check_continuation(TSCont cont);
91 TSReturnCode sdk_sanity_check_null_ptr(void const *ptr);
92
93 ////////////////////////////////////////////////////////////////////
94 //
95 // Threads
96 //
97 ////////////////////////////////////////////////////////////////////
98 struct INKThreadInternal : public EThread {
INKThreadInternalINKThreadInternal99 INKThreadInternal() : EThread(DEDICATED, -1)
100 {
101 ink_mutex_init(&completion.lock);
102 ink_cond_init(&completion.signal);
103 }
104
~INKThreadInternalINKThreadInternal105 ~INKThreadInternal() override
106 {
107 ink_mutex_destroy(&completion.lock);
108 ink_cond_destroy(&completion.signal);
109 }
110
111 TSThreadFunc func = nullptr;
112 void *data = nullptr;
113
114 struct {
115 ink_mutex lock;
116 ink_cond signal;
117 bool done = false;
118 } completion;
119 };
120
121 static void *
ink_thread_trampoline(void * data)122 ink_thread_trampoline(void *data)
123 {
124 void *retval;
125 INKThreadInternal *ithread = static_cast<INKThreadInternal *>(data);
126
127 ithread->set_specific();
128 retval = ithread->func(ithread->data);
129
130 ink_mutex_acquire(&ithread->completion.lock);
131
132 ithread->completion.done = true;
133 ink_cond_broadcast(&ithread->completion.signal);
134
135 ink_mutex_release(&ithread->completion.lock);
136 return retval;
137 }
138
139 /*
140 * INKqa12653. Return TSThread or NULL if error
141 */
142 TSThread
TSThreadCreate(TSThreadFunc func,void * data)143 TSThreadCreate(TSThreadFunc func, void *data)
144 {
145 INKThreadInternal *thread;
146 ink_thread tid = 0;
147
148 thread = new INKThreadInternal;
149
150 ink_assert(thread->event_types == 0);
151 ink_assert(thread->mutex->thread_holding == thread);
152
153 thread->func = func;
154 thread->data = data;
155
156 ink_thread_create(&tid, ink_thread_trampoline, (void *)thread, 1, 0, nullptr);
157 if (!tid) {
158 return (TSThread) nullptr;
159 }
160
161 return reinterpret_cast<TSThread>(thread);
162 }
163
164 // Wait for a thread to complete. When a thread calls TSThreadCreate,
165 // it becomes the owner of the thread's mutex. Since only the thread
166 // that locked a mutex should be allowed to unlock it (a condition
167 // that is enforced for PTHREAD_MUTEX_ERRORCHECK), if the application
168 // needs to delete the thread, it must first wait for the thread to
169 // complete.
170 void
TSThreadWait(TSThread thread)171 TSThreadWait(TSThread thread)
172 {
173 sdk_assert(sdk_sanity_check_iocore_structure(thread) == TS_SUCCESS);
174 INKThreadInternal *ithread = reinterpret_cast<INKThreadInternal *>(thread);
175
176 ink_mutex_acquire(&ithread->completion.lock);
177
178 if (ithread->completion.done == false) {
179 ink_cond_wait(&ithread->completion.signal, &ithread->completion.lock);
180 }
181
182 ink_mutex_release(&ithread->completion.lock);
183 }
184
185 TSThread
TSThreadInit()186 TSThreadInit()
187 {
188 INKThreadInternal *thread;
189
190 thread = new INKThreadInternal;
191
192 #ifdef DEBUG
193 if (thread == nullptr) {
194 return (TSThread) nullptr;
195 }
196 #endif
197
198 thread->set_specific();
199
200 return reinterpret_cast<TSThread>(thread);
201 }
202
203 void
TSThreadDestroy(TSThread thread)204 TSThreadDestroy(TSThread thread)
205 {
206 sdk_assert(sdk_sanity_check_iocore_structure(thread) == TS_SUCCESS);
207
208 INKThreadInternal *ithread = reinterpret_cast<INKThreadInternal *>(thread);
209
210 // The thread must be destroyed by the same thread that created
211 // it because that thread is holding the thread mutex.
212 ink_release_assert(ithread->mutex->thread_holding == ithread);
213
214 // If this thread was created by TSThreadCreate() rather than
215 // TSThreadInit, then we must not destroy it before it's done.
216 if (ithread->func) {
217 ink_release_assert(ithread->completion.done == true);
218 }
219
220 delete ithread;
221 }
222
223 TSThread
TSThreadSelf(void)224 TSThreadSelf(void)
225 {
226 TSThread ithread = (TSThread)this_ethread();
227 return ithread;
228 }
229
230 TSEventThread
TSEventThreadSelf(void)231 TSEventThreadSelf(void)
232 {
233 return reinterpret_cast<TSEventThread>(this_event_thread());
234 }
235
236 ////////////////////////////////////////////////////////////////////
237 //
238 // Mutexes: For TSMutexCreate and TSMutexDestroy, the refcount of the
239 // ProxyMutex object is not incremented or decremented. If the resulting
240 // ProxyMutex is passed to a INKContInternal, it's mutex smart pointer
241 // will take ownership of the ProxyMutex and delete it when the last
242 // reference is removed. TSMutexDestroy should not be called in that case.
243 //
244 ////////////////////////////////////////////////////////////////////
245 TSMutex
TSMutexCreate()246 TSMutexCreate()
247 {
248 ProxyMutex *mutexp = new_ProxyMutex();
249
250 // TODO: Remove this when allocations can never fail.
251 sdk_assert(sdk_sanity_check_mutex((TSMutex)mutexp) == TS_SUCCESS);
252
253 return (TSMutex)mutexp;
254 }
255
256 void
TSMutexDestroy(TSMutex m)257 TSMutexDestroy(TSMutex m)
258 {
259 sdk_assert(sdk_sanity_check_mutex(m) == TS_SUCCESS);
260 ProxyMutex *mutexp = reinterpret_cast<ProxyMutex *>(m);
261
262 if (mutexp) {
263 ink_release_assert(mutexp->refcount() == 0);
264 mutexp->free();
265 }
266 }
267
268 /* The following two APIs are for Into work, actually, APIs of Mutex
269 should allow plugins to manually increase or decrease the refcount
270 of the mutex pointer, plugins may want more control of the creation
271 and destroy of the mutex.*/
272 TSMutex
TSMutexCreateInternal()273 TSMutexCreateInternal()
274 {
275 ProxyMutex *new_mutex = new_ProxyMutex();
276
277 // TODO: Remove this when allocations can never fail.
278 sdk_assert(sdk_sanity_check_mutex((TSMutex)new_mutex) == TS_SUCCESS);
279
280 new_mutex->refcount_inc();
281 return reinterpret_cast<TSMutex>(new_mutex);
282 }
283
284 int
TSMutexCheck(TSMutex mutex)285 TSMutexCheck(TSMutex mutex)
286 {
287 ProxyMutex *mutexp = (ProxyMutex *)mutex;
288
289 if (mutexp->refcount() < 0) {
290 return -1;
291 }
292 if (mutexp->nthread_holding < 0) {
293 return -1;
294 }
295 return 1;
296 }
297
298 void
TSMutexLock(TSMutex mutexp)299 TSMutexLock(TSMutex mutexp)
300 {
301 sdk_assert(sdk_sanity_check_mutex(mutexp) == TS_SUCCESS);
302 ProxyMutex *proxy_mutex = reinterpret_cast<ProxyMutex *>(mutexp);
303 MUTEX_TAKE_LOCK(proxy_mutex, this_ethread());
304 }
305
306 TSReturnCode
TSMutexLockTry(TSMutex mutexp)307 TSMutexLockTry(TSMutex mutexp)
308 {
309 sdk_assert(sdk_sanity_check_mutex(mutexp) == TS_SUCCESS);
310 ProxyMutex *proxy_mutex = reinterpret_cast<ProxyMutex *>(mutexp);
311 return (MUTEX_TAKE_TRY_LOCK(proxy_mutex, this_ethread()) ? TS_SUCCESS : TS_ERROR);
312 }
313
314 void
TSMutexUnlock(TSMutex mutexp)315 TSMutexUnlock(TSMutex mutexp)
316 {
317 sdk_assert(sdk_sanity_check_mutex(mutexp) == TS_SUCCESS);
318 ProxyMutex *proxy_mutex(reinterpret_cast<ProxyMutex *>(mutexp));
319 MUTEX_UNTAKE_LOCK(proxy_mutex, this_ethread());
320 }
321
322 /* VIOs */
323
324 void
TSVIOReenable(TSVIO viop)325 TSVIOReenable(TSVIO viop)
326 {
327 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
328
329 VIO *vio = (VIO *)viop;
330 vio->reenable();
331 }
332
333 TSIOBuffer
TSVIOBufferGet(TSVIO viop)334 TSVIOBufferGet(TSVIO viop)
335 {
336 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
337
338 VIO *vio = (VIO *)viop;
339 return reinterpret_cast<TSIOBuffer>(vio->get_writer());
340 }
341
342 TSIOBufferReader
TSVIOReaderGet(TSVIO viop)343 TSVIOReaderGet(TSVIO viop)
344 {
345 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
346
347 VIO *vio = (VIO *)viop;
348 return reinterpret_cast<TSIOBufferReader>(vio->get_reader());
349 }
350
351 int64_t
TSVIONBytesGet(TSVIO viop)352 TSVIONBytesGet(TSVIO viop)
353 {
354 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
355
356 VIO *vio = (VIO *)viop;
357 return vio->nbytes;
358 }
359
360 void
TSVIONBytesSet(TSVIO viop,int64_t nbytes)361 TSVIONBytesSet(TSVIO viop, int64_t nbytes)
362 {
363 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
364 sdk_assert(nbytes >= 0);
365
366 VIO *vio = (VIO *)viop;
367 vio->nbytes = nbytes;
368 }
369
370 int64_t
TSVIONDoneGet(TSVIO viop)371 TSVIONDoneGet(TSVIO viop)
372 {
373 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
374
375 VIO *vio = (VIO *)viop;
376 return vio->ndone;
377 }
378
379 void
TSVIONDoneSet(TSVIO viop,int64_t ndone)380 TSVIONDoneSet(TSVIO viop, int64_t ndone)
381 {
382 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
383 sdk_assert(ndone >= 0);
384
385 VIO *vio = (VIO *)viop;
386 vio->ndone = ndone;
387 }
388
389 int64_t
TSVIONTodoGet(TSVIO viop)390 TSVIONTodoGet(TSVIO viop)
391 {
392 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
393
394 VIO *vio = (VIO *)viop;
395 return vio->ntodo();
396 }
397
398 TSCont
TSVIOContGet(TSVIO viop)399 TSVIOContGet(TSVIO viop)
400 {
401 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
402
403 VIO *vio = (VIO *)viop;
404 return (TSCont)vio->cont;
405 }
406
407 TSVConn
TSVIOVConnGet(TSVIO viop)408 TSVIOVConnGet(TSVIO viop)
409 {
410 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
411
412 VIO *vio = (VIO *)viop;
413 return (TSVConn)vio->vc_server;
414 }
415
416 TSMutex
TSVIOMutexGet(TSVIO viop)417 TSVIOMutexGet(TSVIO viop)
418 {
419 sdk_assert(sdk_sanity_check_iocore_structure(viop) == TS_SUCCESS);
420
421 VIO *vio = (VIO *)viop;
422 return reinterpret_cast<TSMutex>(vio->mutex.get());
423 }
424
425 /* High Resolution Time */
426
427 ink_hrtime
INKBasedTimeGet()428 INKBasedTimeGet()
429 {
430 return Thread::get_hrtime();
431 }
432
433 /* UDP Connection Interface */
434
435 TSAction
INKUDPBind(TSCont contp,unsigned int ip,int port)436 INKUDPBind(TSCont contp, unsigned int ip, int port)
437 {
438 sdk_assert(sdk_sanity_check_continuation(contp) == TS_SUCCESS);
439
440 FORCE_PLUGIN_SCOPED_MUTEX(contp);
441
442 struct sockaddr_in addr;
443 ats_ip4_set(&addr, ip, htons(port));
444
445 return reinterpret_cast<TSAction>(
446 udpNet.UDPBind((Continuation *)contp, ats_ip_sa_cast(&addr), -1, INK_ETHERNET_MTU_SIZE, INK_ETHERNET_MTU_SIZE));
447 }
448
449 TSAction
INKUDPSendTo(TSCont contp,INKUDPConn udp,unsigned int ip,int port,char * data,int64_t len)450 INKUDPSendTo(TSCont contp, INKUDPConn udp, unsigned int ip, int port, char *data, int64_t len)
451 {
452 sdk_assert(sdk_sanity_check_continuation(contp) == TS_SUCCESS);
453
454 FORCE_PLUGIN_SCOPED_MUTEX(contp);
455 UDPPacket *packet = new_UDPPacket();
456 UDPConnection *conn = (UDPConnection *)udp;
457
458 ats_ip4_set(&packet->to, ip, htons(port));
459
460 IOBufferBlock *blockp = new_IOBufferBlock();
461 blockp->alloc(BUFFER_SIZE_INDEX_32K);
462
463 if (len > index_to_buffer_size(BUFFER_SIZE_INDEX_32K)) {
464 len = index_to_buffer_size(BUFFER_SIZE_INDEX_32K) - 1;
465 }
466
467 memcpy(blockp->start(), data, len);
468 blockp->fill(len);
469
470 packet->append_block((IOBufferBlock *)blockp);
471 /* (Jinsheng 11/27/00) set connection twice which causes:
472 FATAL: ../../../proxy/iocore/UDPPacket.h:136:
473 failed assert `!m_conn` */
474
475 /* packet->setConnection ((UDPConnection *)udp); */
476 return reinterpret_cast<TSAction>(conn->send((Continuation *)contp, packet));
477 }
478
479 TSAction
INKUDPRecvFrom(TSCont contp,INKUDPConn udp)480 INKUDPRecvFrom(TSCont contp, INKUDPConn udp)
481 {
482 sdk_assert(sdk_sanity_check_continuation(contp) == TS_SUCCESS);
483
484 FORCE_PLUGIN_SCOPED_MUTEX(contp);
485 UDPConnection *conn = (UDPConnection *)udp;
486 return reinterpret_cast<TSAction>(conn->recv((Continuation *)contp));
487 }
488
489 int
INKUDPConnFdGet(INKUDPConn udp)490 INKUDPConnFdGet(INKUDPConn udp)
491 {
492 UDPConnection *conn = (UDPConnection *)udp;
493 return conn->getFd();
494 }
495
496 /* UDP Packet */
497 INKUDPPacket
INKUDPPacketCreate()498 INKUDPPacketCreate()
499 {
500 UDPPacket *packet = new_UDPPacket();
501 return ((INKUDPPacket)packet);
502 }
503
504 TSIOBufferBlock
INKUDPPacketBufferBlockGet(INKUDPPacket packet)505 INKUDPPacketBufferBlockGet(INKUDPPacket packet)
506 {
507 sdk_assert(sdk_sanity_check_null_ptr((void *)packet) == TS_SUCCESS);
508
509 UDPPacket *p = (UDPPacket *)packet;
510 return ((TSIOBufferBlock)p->getIOBlockChain());
511 }
512
513 unsigned int
INKUDPPacketFromAddressGet(INKUDPPacket packet)514 INKUDPPacketFromAddressGet(INKUDPPacket packet)
515 {
516 sdk_assert(sdk_sanity_check_null_ptr((void *)packet) == TS_SUCCESS);
517
518 UDPPacket *p = (UDPPacket *)packet;
519 return ats_ip4_addr_cast(&p->from);
520 }
521
522 int
INKUDPPacketFromPortGet(INKUDPPacket packet)523 INKUDPPacketFromPortGet(INKUDPPacket packet)
524 {
525 sdk_assert(sdk_sanity_check_null_ptr((void *)packet) == TS_SUCCESS);
526
527 UDPPacket *p = (UDPPacket *)packet;
528 return ats_ip_port_host_order(&p->from);
529 }
530
531 INKUDPConn
INKUDPPacketConnGet(INKUDPPacket packet)532 INKUDPPacketConnGet(INKUDPPacket packet)
533 {
534 sdk_assert(sdk_sanity_check_null_ptr((void *)packet) == TS_SUCCESS);
535
536 UDPPacket *p = (UDPPacket *)packet;
537 return ((INKUDPConn)p->getConnection());
538 }
539
540 void
INKUDPPacketDestroy(INKUDPPacket packet)541 INKUDPPacketDestroy(INKUDPPacket packet)
542 {
543 sdk_assert(sdk_sanity_check_null_ptr((void *)packet) == TS_SUCCESS);
544
545 UDPPacket *p = (UDPPacket *)packet;
546 p->free();
547 }
548
549 /* Packet Queue */
550
551 INKUDPPacket
INKUDPPacketGet(INKUDPacketQueue queuep)552 INKUDPPacketGet(INKUDPacketQueue queuep)
553 {
554 if (queuep != nullptr) {
555 UDPPacket *packet;
556 Queue<UDPPacket> *qp = (Queue<UDPPacket> *)queuep;
557
558 packet = qp->pop();
559 return (packet);
560 }
561
562 return nullptr;
563 }
564
565 /* Buffers */
566
567 TSIOBuffer
TSIOBufferCreate()568 TSIOBufferCreate()
569 {
570 MIOBuffer *b = new_empty_MIOBuffer(BUFFER_SIZE_INDEX_32K);
571
572 // TODO: Should remove this when memory allocations can't fail.
573 sdk_assert(sdk_sanity_check_iocore_structure(b) == TS_SUCCESS);
574 return reinterpret_cast<TSIOBuffer>(b);
575 }
576
577 TSIOBuffer
TSIOBufferSizedCreate(TSIOBufferSizeIndex index)578 TSIOBufferSizedCreate(TSIOBufferSizeIndex index)
579 {
580 sdk_assert((index >= TS_IOBUFFER_SIZE_INDEX_128) && (index <= TS_IOBUFFER_SIZE_INDEX_32K));
581
582 MIOBuffer *b = new_MIOBuffer(index);
583
584 // TODO: Should remove this when memory allocations can't fail.
585 sdk_assert(sdk_sanity_check_iocore_structure(b) == TS_SUCCESS);
586 return reinterpret_cast<TSIOBuffer>(b);
587 }
588
589 void
TSIOBufferDestroy(TSIOBuffer bufp)590 TSIOBufferDestroy(TSIOBuffer bufp)
591 {
592 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
593 free_MIOBuffer((MIOBuffer *)bufp);
594 }
595
596 TSIOBufferBlock
TSIOBufferStart(TSIOBuffer bufp)597 TSIOBufferStart(TSIOBuffer bufp)
598 {
599 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
600
601 MIOBuffer *b = (MIOBuffer *)bufp;
602 IOBufferBlock *blk = b->get_current_block();
603
604 if (!blk || (blk->write_avail() == 0)) {
605 b->add_block();
606 }
607 blk = b->get_current_block();
608
609 // TODO: Remove when memory allocations can't fail.
610 sdk_assert(sdk_sanity_check_null_ptr((void *)blk) == TS_SUCCESS);
611
612 return (TSIOBufferBlock)blk;
613 }
614
615 int64_t
TSIOBufferCopy(TSIOBuffer bufp,TSIOBufferReader readerp,int64_t length,int64_t offset)616 TSIOBufferCopy(TSIOBuffer bufp, TSIOBufferReader readerp, int64_t length, int64_t offset)
617 {
618 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
619 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
620 sdk_assert((length >= 0) && (offset >= 0));
621
622 MIOBuffer *b = (MIOBuffer *)bufp;
623 IOBufferReader *r = (IOBufferReader *)readerp;
624
625 return b->write(r, length, offset);
626 }
627
628 int64_t
TSIOBufferWrite(TSIOBuffer bufp,const void * buf,int64_t length)629 TSIOBufferWrite(TSIOBuffer bufp, const void *buf, int64_t length)
630 {
631 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
632 sdk_assert(sdk_sanity_check_null_ptr((void *)buf) == TS_SUCCESS);
633 sdk_assert(length >= 0);
634
635 MIOBuffer *b = (MIOBuffer *)bufp;
636 return b->write(buf, length);
637 }
638
639 int64_t
TSIOBufferReaderCopy(TSIOBufferReader readerp,void * buf,int64_t length)640 TSIOBufferReaderCopy(TSIOBufferReader readerp, void *buf, int64_t length)
641 {
642 auto r{reinterpret_cast<IOBufferReader *>(readerp)};
643 char *limit = r->memcpy(buf, length, 0);
644 return limit - static_cast<char *>(buf);
645 }
646
647 void
TSIOBufferProduce(TSIOBuffer bufp,int64_t nbytes)648 TSIOBufferProduce(TSIOBuffer bufp, int64_t nbytes)
649 {
650 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
651 sdk_assert(nbytes >= 0);
652
653 MIOBuffer *b = (MIOBuffer *)bufp;
654 b->fill(nbytes);
655 }
656
657 // dev API, not exposed
658 void
TSIOBufferBlockDestroy(TSIOBufferBlock blockp)659 TSIOBufferBlockDestroy(TSIOBufferBlock blockp)
660 {
661 sdk_assert(sdk_sanity_check_iocore_structure(blockp) == TS_SUCCESS);
662
663 IOBufferBlock *blk = (IOBufferBlock *)blockp;
664 blk->free();
665 }
666
667 TSIOBufferBlock
TSIOBufferBlockNext(TSIOBufferBlock blockp)668 TSIOBufferBlockNext(TSIOBufferBlock blockp)
669 {
670 sdk_assert(sdk_sanity_check_iocore_structure(blockp) == TS_SUCCESS);
671
672 IOBufferBlock *blk = (IOBufferBlock *)blockp;
673 return (TSIOBufferBlock)(blk->next.get());
674 }
675
676 // dev API, not exposed
677 int64_t
TSIOBufferBlockDataSizeGet(TSIOBufferBlock blockp)678 TSIOBufferBlockDataSizeGet(TSIOBufferBlock blockp)
679 {
680 sdk_assert(sdk_sanity_check_iocore_structure(blockp) == TS_SUCCESS);
681
682 IOBufferBlock *blk = (IOBufferBlock *)blockp;
683 return (blk->read_avail());
684 }
685
686 const char *
TSIOBufferBlockReadStart(TSIOBufferBlock blockp,TSIOBufferReader readerp,int64_t * avail)687 TSIOBufferBlockReadStart(TSIOBufferBlock blockp, TSIOBufferReader readerp, int64_t *avail)
688 {
689 sdk_assert(sdk_sanity_check_iocore_structure(blockp) == TS_SUCCESS);
690 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
691
692 IOBufferBlock *blk = (IOBufferBlock *)blockp;
693 IOBufferReader *reader = (IOBufferReader *)readerp;
694 char *p;
695
696 p = blk->start();
697 if (avail) {
698 *avail = blk->read_avail();
699 }
700
701 if (reader->block.get() == blk) {
702 p += reader->start_offset;
703 if (avail) {
704 *avail -= reader->start_offset;
705 if (*avail < 0) {
706 *avail = 0;
707 }
708 }
709 }
710
711 return (const char *)p;
712 }
713
714 int64_t
TSIOBufferBlockReadAvail(TSIOBufferBlock blockp,TSIOBufferReader readerp)715 TSIOBufferBlockReadAvail(TSIOBufferBlock blockp, TSIOBufferReader readerp)
716 {
717 sdk_assert(sdk_sanity_check_iocore_structure(blockp) == TS_SUCCESS);
718 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
719
720 IOBufferBlock *blk = (IOBufferBlock *)blockp;
721 IOBufferReader *reader = (IOBufferReader *)readerp;
722 int64_t avail;
723
724 avail = blk->read_avail();
725
726 if (reader->block.get() == blk) {
727 avail -= reader->start_offset;
728 if (avail < 0) {
729 avail = 0;
730 }
731 }
732
733 return avail;
734 }
735
736 char *
TSIOBufferBlockWriteStart(TSIOBufferBlock blockp,int64_t * avail)737 TSIOBufferBlockWriteStart(TSIOBufferBlock blockp, int64_t *avail)
738 {
739 sdk_assert(sdk_sanity_check_iocore_structure(blockp) == TS_SUCCESS);
740
741 IOBufferBlock *blk = (IOBufferBlock *)blockp;
742
743 if (avail) {
744 *avail = blk->write_avail();
745 }
746 return blk->end();
747 }
748
749 int64_t
TSIOBufferBlockWriteAvail(TSIOBufferBlock blockp)750 TSIOBufferBlockWriteAvail(TSIOBufferBlock blockp)
751 {
752 sdk_assert(sdk_sanity_check_iocore_structure(blockp) == TS_SUCCESS);
753
754 IOBufferBlock *blk = (IOBufferBlock *)blockp;
755 return blk->write_avail();
756 }
757
758 int64_t
TSIOBufferWaterMarkGet(TSIOBuffer bufp)759 TSIOBufferWaterMarkGet(TSIOBuffer bufp)
760 {
761 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
762
763 MIOBuffer *b = (MIOBuffer *)bufp;
764 return b->water_mark;
765 }
766
767 void
TSIOBufferWaterMarkSet(TSIOBuffer bufp,int64_t water_mark)768 TSIOBufferWaterMarkSet(TSIOBuffer bufp, int64_t water_mark)
769 {
770 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
771 sdk_assert(water_mark >= 0);
772
773 MIOBuffer *b = (MIOBuffer *)bufp;
774 b->water_mark = water_mark;
775 }
776
777 TSIOBufferReader
TSIOBufferReaderAlloc(TSIOBuffer bufp)778 TSIOBufferReaderAlloc(TSIOBuffer bufp)
779 {
780 sdk_assert(sdk_sanity_check_iocore_structure(bufp) == TS_SUCCESS);
781
782 MIOBuffer *b = (MIOBuffer *)bufp;
783 TSIOBufferReader readerp = (TSIOBufferReader)b->alloc_reader();
784
785 // TODO: Should remove this when memory allocation can't fail.
786 sdk_assert(sdk_sanity_check_null_ptr((void *)readerp) == TS_SUCCESS);
787 return readerp;
788 }
789
790 TSIOBufferReader
TSIOBufferReaderClone(TSIOBufferReader readerp)791 TSIOBufferReaderClone(TSIOBufferReader readerp)
792 {
793 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
794
795 IOBufferReader *r = (IOBufferReader *)readerp;
796 return (TSIOBufferReader)r->clone();
797 }
798
799 void
TSIOBufferReaderFree(TSIOBufferReader readerp)800 TSIOBufferReaderFree(TSIOBufferReader readerp)
801 {
802 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
803
804 IOBufferReader *r = (IOBufferReader *)readerp;
805 r->mbuf->dealloc_reader(r);
806 }
807
808 TSIOBufferBlock
TSIOBufferReaderStart(TSIOBufferReader readerp)809 TSIOBufferReaderStart(TSIOBufferReader readerp)
810 {
811 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
812
813 IOBufferReader *r = (IOBufferReader *)readerp;
814
815 if (r->block) {
816 r->skip_empty_blocks();
817 }
818
819 return reinterpret_cast<TSIOBufferBlock>(r->get_current_block());
820 }
821
822 void
TSIOBufferReaderConsume(TSIOBufferReader readerp,int64_t nbytes)823 TSIOBufferReaderConsume(TSIOBufferReader readerp, int64_t nbytes)
824 {
825 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
826 sdk_assert(nbytes >= 0);
827
828 IOBufferReader *r = (IOBufferReader *)readerp;
829 r->consume(nbytes);
830 }
831
832 int64_t
TSIOBufferReaderAvail(TSIOBufferReader readerp)833 TSIOBufferReaderAvail(TSIOBufferReader readerp)
834 {
835 sdk_assert(sdk_sanity_check_iocore_structure(readerp) == TS_SUCCESS);
836
837 IOBufferReader *r = (IOBufferReader *)readerp;
838 return r->read_avail();
839 }
840