1 /*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/port.h"
22
23 #include <limits.h>
24 #include <string.h>
25
26 #include <grpc/slice_buffer.h>
27
28 #include <grpc/support/alloc.h>
29 #include <grpc/support/log.h>
30 #include <grpc/support/string_util.h>
31
32 #include "src/core/lib/iomgr/error.h"
33 #include "src/core/lib/iomgr/iomgr_custom.h"
34 #include "src/core/lib/iomgr/resource_quota.h"
35 #include "src/core/lib/iomgr/tcp_client.h"
36 #include "src/core/lib/iomgr/tcp_custom.h"
37 #include "src/core/lib/iomgr/tcp_server.h"
38 #include "src/core/lib/slice/slice_internal.h"
39 #include "src/core/lib/slice/slice_string_helpers.h"
40
41 #define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
42
43 extern grpc_core::TraceFlag grpc_tcp_trace;
44
45 grpc_socket_vtable* grpc_custom_socket_vtable = nullptr;
46 extern grpc_tcp_server_vtable custom_tcp_server_vtable;
47 extern grpc_tcp_client_vtable custom_tcp_client_vtable;
48
grpc_custom_endpoint_init(grpc_socket_vtable * impl)49 void grpc_custom_endpoint_init(grpc_socket_vtable* impl) {
50 grpc_custom_socket_vtable = impl;
51 grpc_set_tcp_client_impl(&custom_tcp_client_vtable);
52 grpc_set_tcp_server_impl(&custom_tcp_server_vtable);
53 }
54
55 struct custom_tcp_endpoint {
56 grpc_endpoint base;
57 gpr_refcount refcount;
58 grpc_custom_socket* socket;
59
60 grpc_closure* read_cb;
61 grpc_closure* write_cb;
62
63 grpc_slice_buffer* read_slices;
64 grpc_slice_buffer* write_slices;
65
66 grpc_resource_user* resource_user;
67 grpc_resource_user_slice_allocator slice_allocator;
68
69 bool shutting_down;
70
71 char* peer_string;
72 };
tcp_free(grpc_custom_socket * s)73 static void tcp_free(grpc_custom_socket* s) {
74 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)s->endpoint;
75 grpc_resource_user_unref(tcp->resource_user);
76 gpr_free(tcp->peer_string);
77 gpr_free(tcp);
78 s->refs--;
79 if (s->refs == 0) {
80 grpc_custom_socket_vtable->destroy(s);
81 gpr_free(s);
82 }
83 }
84
85 #ifndef NDEBUG
86 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
87 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
tcp_unref(custom_tcp_endpoint * tcp,const char * reason,const char * file,int line)88 static void tcp_unref(custom_tcp_endpoint* tcp, const char* reason,
89 const char* file, int line) {
90 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
91 gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
92 gpr_log(file, line, GPR_LOG_SEVERITY_ERROR,
93 "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason,
94 val, val - 1);
95 }
96 if (gpr_unref(&tcp->refcount)) {
97 tcp_free(tcp->socket);
98 }
99 }
100
tcp_ref(custom_tcp_endpoint * tcp,const char * reason,const char * file,int line)101 static void tcp_ref(custom_tcp_endpoint* tcp, const char* reason,
102 const char* file, int line) {
103 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
104 gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
105 gpr_log(file, line, GPR_LOG_SEVERITY_ERROR,
106 "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason,
107 val, val + 1);
108 }
109 gpr_ref(&tcp->refcount);
110 }
111 #else
112 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
113 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(custom_tcp_endpoint * tcp)114 static void tcp_unref(custom_tcp_endpoint* tcp) {
115 if (gpr_unref(&tcp->refcount)) {
116 tcp_free(tcp->socket);
117 }
118 }
119
tcp_ref(custom_tcp_endpoint * tcp)120 static void tcp_ref(custom_tcp_endpoint* tcp) { gpr_ref(&tcp->refcount); }
121 #endif
122
call_read_cb(custom_tcp_endpoint * tcp,grpc_error * error)123 static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) {
124 grpc_closure* cb = tcp->read_cb;
125 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
126 gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
127 cb->cb_arg);
128 size_t i;
129 const char* str = grpc_error_string(error);
130 gpr_log(GPR_INFO, "read: error=%s", str);
131
132 for (i = 0; i < tcp->read_slices->count; i++) {
133 char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
134 GPR_DUMP_HEX | GPR_DUMP_ASCII);
135 gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
136 gpr_free(dump);
137 }
138 }
139 TCP_UNREF(tcp, "read");
140 tcp->read_slices = nullptr;
141 tcp->read_cb = nullptr;
142 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
143 }
144
custom_read_callback(grpc_custom_socket * socket,size_t nread,grpc_error * error)145 static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
146 grpc_error* error) {
147 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
148 grpc_core::ExecCtx exec_ctx;
149 grpc_slice_buffer garbage;
150 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
151 if (error == GRPC_ERROR_NONE && nread == 0) {
152 error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
153 }
154 if (error == GRPC_ERROR_NONE) {
155 // Successful read
156 if ((size_t)nread < tcp->read_slices->length) {
157 /* TODO(murgatroid99): Instead of discarding the unused part of the read
158 * buffer, reuse it as the next read buffer. */
159 grpc_slice_buffer_init(&garbage);
160 grpc_slice_buffer_trim_end(
161 tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage);
162 grpc_slice_buffer_reset_and_unref_internal(&garbage);
163 }
164 } else {
165 grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
166 }
167 call_read_cb(tcp, error);
168 }
169
tcp_read_allocation_done(void * tcpp,grpc_error * error)170 static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
171 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp;
172 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
173 gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
174 grpc_error_string(error));
175 }
176 if (error == GRPC_ERROR_NONE) {
177 /* Before calling read, we allocate a buffer with exactly one slice
178 * to tcp->read_slices and wait for the callback indicating that the
179 * allocation was successful. So slices[0] should always exist here */
180 char* buffer = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]);
181 size_t len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]);
182 grpc_custom_socket_vtable->read(tcp->socket, buffer, len,
183 custom_read_callback);
184 } else {
185 grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
186 call_read_cb(tcp, GRPC_ERROR_REF(error));
187 }
188 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
189 const char* str = grpc_error_string(error);
190 gpr_log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket, str);
191 }
192 }
193
endpoint_read(grpc_endpoint * ep,grpc_slice_buffer * read_slices,grpc_closure * cb,bool)194 static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
195 grpc_closure* cb, bool /*urgent*/) {
196 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
197 GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
198 GPR_ASSERT(tcp->read_cb == nullptr);
199 tcp->read_cb = cb;
200 tcp->read_slices = read_slices;
201 grpc_slice_buffer_reset_and_unref_internal(read_slices);
202 TCP_REF(tcp, "read");
203 if (grpc_resource_user_alloc_slices(&tcp->slice_allocator,
204 GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
205 tcp->read_slices)) {
206 tcp_read_allocation_done(tcp, GRPC_ERROR_NONE);
207 }
208 }
209
custom_write_callback(grpc_custom_socket * socket,grpc_error * error)210 static void custom_write_callback(grpc_custom_socket* socket,
211 grpc_error* error) {
212 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
213 grpc_core::ExecCtx exec_ctx;
214 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
215 grpc_closure* cb = tcp->write_cb;
216 tcp->write_cb = nullptr;
217 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
218 const char* str = grpc_error_string(error);
219 gpr_log(GPR_INFO, "write complete on %p: error=%s", tcp->socket, str);
220 }
221 TCP_UNREF(tcp, "write");
222 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
223 }
224
endpoint_write(grpc_endpoint * ep,grpc_slice_buffer * write_slices,grpc_closure * cb,void *)225 static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices,
226 grpc_closure* cb, void* /*arg*/) {
227 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
228 GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
229
230 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
231 size_t j;
232
233 for (j = 0; j < write_slices->count; j++) {
234 char* data = grpc_dump_slice(write_slices->slices[j],
235 GPR_DUMP_HEX | GPR_DUMP_ASCII);
236 gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp->socket, tcp->peer_string,
237 data);
238 gpr_free(data);
239 }
240 }
241
242 if (tcp->shutting_down) {
243 grpc_core::ExecCtx::Run(
244 DEBUG_LOCATION, cb,
245 GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP socket is shutting down"));
246 return;
247 }
248
249 GPR_ASSERT(tcp->write_cb == nullptr);
250 tcp->write_slices = write_slices;
251 GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
252 if (tcp->write_slices->count == 0) {
253 // No slices means we don't have to do anything,
254 // and libuv doesn't like empty writes
255 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, GRPC_ERROR_NONE);
256 return;
257 }
258 tcp->write_cb = cb;
259 TCP_REF(tcp, "write");
260 grpc_custom_socket_vtable->write(tcp->socket, tcp->write_slices,
261 custom_write_callback);
262 }
263
endpoint_add_to_pollset(grpc_endpoint * ep,grpc_pollset * pollset)264 static void endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
265 // No-op. We're ignoring pollsets currently
266 (void)ep;
267 (void)pollset;
268 }
269
endpoint_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset)270 static void endpoint_add_to_pollset_set(grpc_endpoint* ep,
271 grpc_pollset_set* pollset) {
272 // No-op. We're ignoring pollsets currently
273 (void)ep;
274 (void)pollset;
275 }
276
endpoint_delete_from_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset)277 static void endpoint_delete_from_pollset_set(grpc_endpoint* ep,
278 grpc_pollset_set* pollset) {
279 // No-op. We're ignoring pollsets currently
280 (void)ep;
281 (void)pollset;
282 }
283
endpoint_shutdown(grpc_endpoint * ep,grpc_error * why)284 static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
285 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
286 if (!tcp->shutting_down) {
287 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
288 const char* str = grpc_error_string(why);
289 gpr_log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket, str);
290 }
291 tcp->shutting_down = true;
292 // grpc_core::ExecCtx::Run(DEBUG_LOCATION,tcp->read_cb,
293 // GRPC_ERROR_REF(why));
294 // grpc_core::ExecCtx::Run(DEBUG_LOCATION,tcp->write_cb,
295 // GRPC_ERROR_REF(why)); tcp->read_cb = nullptr; tcp->write_cb = nullptr;
296 grpc_resource_user_shutdown(tcp->resource_user);
297 grpc_custom_socket_vtable->shutdown(tcp->socket);
298 }
299 GRPC_ERROR_UNREF(why);
300 }
301
custom_close_callback(grpc_custom_socket * socket)302 static void custom_close_callback(grpc_custom_socket* socket) {
303 socket->refs--;
304 if (socket->refs == 0) {
305 grpc_custom_socket_vtable->destroy(socket);
306 gpr_free(socket);
307 } else if (socket->endpoint) {
308 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
309 grpc_core::ExecCtx exec_ctx;
310 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
311 TCP_UNREF(tcp, "destroy");
312 }
313 }
314
endpoint_destroy(grpc_endpoint * ep)315 static void endpoint_destroy(grpc_endpoint* ep) {
316 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
317 grpc_custom_socket_vtable->close(tcp->socket, custom_close_callback);
318 }
319
endpoint_get_peer(grpc_endpoint * ep)320 static char* endpoint_get_peer(grpc_endpoint* ep) {
321 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
322 return gpr_strdup(tcp->peer_string);
323 }
324
endpoint_get_resource_user(grpc_endpoint * ep)325 static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
326 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
327 return tcp->resource_user;
328 }
329
endpoint_get_fd(grpc_endpoint *)330 static int endpoint_get_fd(grpc_endpoint* /*ep*/) { return -1; }
331
endpoint_can_track_err(grpc_endpoint *)332 static bool endpoint_can_track_err(grpc_endpoint* /*ep*/) { return false; }
333
334 static grpc_endpoint_vtable vtable = {endpoint_read,
335 endpoint_write,
336 endpoint_add_to_pollset,
337 endpoint_add_to_pollset_set,
338 endpoint_delete_from_pollset_set,
339 endpoint_shutdown,
340 endpoint_destroy,
341 endpoint_get_resource_user,
342 endpoint_get_peer,
343 endpoint_get_fd,
344 endpoint_can_track_err};
345
custom_tcp_endpoint_create(grpc_custom_socket * socket,grpc_resource_quota * resource_quota,char * peer_string)346 grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
347 grpc_resource_quota* resource_quota,
348 char* peer_string) {
349 custom_tcp_endpoint* tcp =
350 (custom_tcp_endpoint*)gpr_malloc(sizeof(custom_tcp_endpoint));
351 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
352 grpc_core::ExecCtx exec_ctx;
353
354 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
355 gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket);
356 }
357 memset(tcp, 0, sizeof(custom_tcp_endpoint));
358 socket->refs++;
359 socket->endpoint = (grpc_endpoint*)tcp;
360 tcp->socket = socket;
361 tcp->base.vtable = &vtable;
362 gpr_ref_init(&tcp->refcount, 1);
363 tcp->peer_string = gpr_strdup(peer_string);
364 tcp->shutting_down = false;
365 tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
366 grpc_resource_user_slice_allocator_init(
367 &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
368
369 return &tcp->base;
370 }
371