1 /*
2 *
3 * Copyright 2015-2016 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/surface/server.h"
22
23 #include <limits.h>
24 #include <stdlib.h>
25 #include <string.h>
26
27 #include <grpc/support/alloc.h>
28 #include <grpc/support/log.h>
29 #include <grpc/support/string_util.h>
30
31 #include <utility>
32
33 #include "src/core/lib/channel/channel_args.h"
34 #include "src/core/lib/channel/channelz.h"
35 #include "src/core/lib/channel/connected_channel.h"
36 #include "src/core/lib/debug/stats.h"
37 #include "src/core/lib/gpr/mpscq.h"
38 #include "src/core/lib/gpr/spinlock.h"
39 #include "src/core/lib/gpr/string.h"
40 #include "src/core/lib/iomgr/executor.h"
41 #include "src/core/lib/iomgr/iomgr.h"
42 #include "src/core/lib/slice/slice_internal.h"
43 #include "src/core/lib/surface/api_trace.h"
44 #include "src/core/lib/surface/call.h"
45 #include "src/core/lib/surface/channel.h"
46 #include "src/core/lib/surface/completion_queue.h"
47 #include "src/core/lib/surface/init.h"
48 #include "src/core/lib/transport/metadata.h"
49 #include "src/core/lib/transport/static_metadata.h"
50
51 grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
52
53 static void server_on_recv_initial_metadata(void* ptr, grpc_error* error);
54 static void server_recv_trailing_metadata_ready(void* user_data,
55 grpc_error* error);
56
57 namespace {
58 struct listener {
59 void* arg;
60 void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
61 size_t pollset_count);
62 void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure);
63 struct listener* next;
64 intptr_t socket_uuid;
65 grpc_closure destroy_done;
66 };
67
68 enum requested_call_type { BATCH_CALL, REGISTERED_CALL };
69
70 struct registered_method;
71
72 struct requested_call {
73 gpr_mpscq_node request_link; /* must be first */
74 requested_call_type type;
75 size_t cq_idx;
76 void* tag;
77 grpc_server* server;
78 grpc_completion_queue* cq_bound_to_call;
79 grpc_call** call;
80 grpc_cq_completion completion;
81 grpc_metadata_array* initial_metadata;
82 union {
83 struct {
84 grpc_call_details* details;
85 } batch;
86 struct {
87 registered_method* method;
88 gpr_timespec* deadline;
89 grpc_byte_buffer** optional_payload;
90 } registered;
91 } data;
92 };
93
94 struct channel_registered_method {
95 registered_method* server_registered_method;
96 uint32_t flags;
97 bool has_host;
98 grpc_slice method;
99 grpc_slice host;
100 };
101
102 struct channel_data {
103 grpc_server* server;
104 grpc_connectivity_state connectivity_state;
105 grpc_channel* channel;
106 size_t cq_idx;
107 /* linked list of all channels on a server */
108 channel_data* next;
109 channel_data* prev;
110 channel_registered_method* registered_methods;
111 uint32_t registered_method_slots;
112 uint32_t registered_method_max_probes;
113 grpc_closure finish_destroy_channel_closure;
114 grpc_closure channel_connectivity_changed;
115 intptr_t channelz_socket_uuid;
116 };
117
118 typedef struct shutdown_tag {
119 void* tag;
120 grpc_completion_queue* cq;
121 grpc_cq_completion completion;
122 } shutdown_tag;
123
124 typedef enum {
125 /* waiting for metadata */
126 NOT_STARTED,
127 /* initial metadata read, not flow controlled in yet */
128 PENDING,
129 /* flow controlled in, on completion queue */
130 ACTIVATED,
131 /* cancelled before being queued */
132 ZOMBIED
133 } call_state;
134
135 typedef struct request_matcher request_matcher;
136
137 struct call_data {
call_data__anon956a9b480111::call_data138 call_data(grpc_call_element* elem, const grpc_call_element_args& args)
139 : call(grpc_call_from_top_element(elem)),
140 call_combiner(args.call_combiner) {
141 GRPC_CLOSURE_INIT(&server_on_recv_initial_metadata,
142 ::server_on_recv_initial_metadata, elem,
143 grpc_schedule_on_exec_ctx);
144 GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
145 server_recv_trailing_metadata_ready, elem,
146 grpc_schedule_on_exec_ctx);
147 }
~call_data__anon956a9b480111::call_data148 ~call_data() {
149 GPR_ASSERT(state != PENDING);
150 GRPC_ERROR_UNREF(recv_initial_metadata_error);
151 if (host_set) {
152 grpc_slice_unref_internal(host);
153 }
154 if (path_set) {
155 grpc_slice_unref_internal(path);
156 }
157 grpc_metadata_array_destroy(&initial_metadata);
158 grpc_byte_buffer_destroy(payload);
159 }
160
161 grpc_call* call;
162
163 gpr_atm state = NOT_STARTED;
164
165 bool path_set = false;
166 bool host_set = false;
167 grpc_slice path;
168 grpc_slice host;
169 grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
170
171 grpc_completion_queue* cq_new = nullptr;
172
173 grpc_metadata_batch* recv_initial_metadata = nullptr;
174 uint32_t recv_initial_metadata_flags = 0;
175 grpc_metadata_array initial_metadata =
176 grpc_metadata_array(); // Zero-initialize the C struct.
177
178 request_matcher* matcher = nullptr;
179 grpc_byte_buffer* payload = nullptr;
180
181 grpc_closure got_initial_metadata;
182 grpc_closure server_on_recv_initial_metadata;
183 grpc_closure kill_zombie_closure;
184 grpc_closure* on_done_recv_initial_metadata;
185 grpc_closure recv_trailing_metadata_ready;
186 grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
187 grpc_closure* original_recv_trailing_metadata_ready;
188 grpc_error* recv_trailing_metadata_error = GRPC_ERROR_NONE;
189 bool seen_recv_trailing_metadata_ready = false;
190
191 grpc_closure publish;
192
193 call_data* pending_next = nullptr;
194 grpc_core::CallCombiner* call_combiner;
195 };
196
197 struct request_matcher {
198 grpc_server* server;
199 call_data* pending_head;
200 call_data* pending_tail;
201 gpr_locked_mpscq* requests_per_cq;
202 };
203
204 struct registered_method {
205 char* method;
206 char* host;
207 grpc_server_register_method_payload_handling payload_handling;
208 uint32_t flags;
209 /* one request matcher per method */
210 request_matcher matcher;
211 registered_method* next;
212 };
213
214 typedef struct {
215 grpc_channel** channels;
216 size_t num_channels;
217 } channel_broadcaster;
218 } // namespace
219
220 struct grpc_server {
221 grpc_channel_args* channel_args;
222
223 grpc_resource_user* default_resource_user;
224
225 grpc_completion_queue** cqs;
226 grpc_pollset** pollsets;
227 size_t cq_count;
228 size_t pollset_count;
229 bool started;
230
231 /* The two following mutexes control access to server-state
232 mu_global controls access to non-call-related state (e.g., channel state)
233 mu_call controls access to call-related state (e.g., the call lists)
234
235 If they are ever required to be nested, you must lock mu_global
236 before mu_call. This is currently used in shutdown processing
237 (grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
238 gpr_mu mu_global; /* mutex for server and channel state */
239 gpr_mu mu_call; /* mutex for call-specific state */
240
241 /* startup synchronization: flag is protected by mu_global, signals whether
242 we are doing the listener start routine or not */
243 bool starting;
244 gpr_cv starting_cv;
245
246 registered_method* registered_methods;
247 /** one request matcher for unregistered methods */
248 request_matcher unregistered_request_matcher;
249
250 gpr_atm shutdown_flag;
251 uint8_t shutdown_published;
252 size_t num_shutdown_tags;
253 shutdown_tag* shutdown_tags;
254
255 channel_data root_channel_data;
256
257 listener* listeners;
258 int listeners_destroyed;
259 gpr_refcount internal_refcount;
260
261 /** when did we print the last shutdown progress message */
262 gpr_timespec last_shutdown_message_time;
263
264 grpc_core::RefCountedPtr<grpc_core::channelz::ServerNode> channelz_server;
265 };
266
267 #define SERVER_FROM_CALL_ELEM(elem) \
268 (((channel_data*)(elem)->channel_data)->server)
269
270 static void publish_new_rpc(void* calld, grpc_error* error);
271 static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc,
272 grpc_error* error);
273 /* Before calling maybe_finish_shutdown, we must hold mu_global and not
274 hold mu_call */
275 static void maybe_finish_shutdown(grpc_server* server);
276
277 /*
278 * channel broadcaster
279 */
280
281 /* assumes server locked */
channel_broadcaster_init(grpc_server * s,channel_broadcaster * cb)282 static void channel_broadcaster_init(grpc_server* s, channel_broadcaster* cb) {
283 channel_data* c;
284 size_t count = 0;
285 for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
286 count++;
287 }
288 cb->num_channels = count;
289 cb->channels = static_cast<grpc_channel**>(
290 gpr_malloc(sizeof(*cb->channels) * cb->num_channels));
291 count = 0;
292 for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
293 cb->channels[count++] = c->channel;
294 GRPC_CHANNEL_INTERNAL_REF(c->channel, "broadcast");
295 }
296 }
297
298 struct shutdown_cleanup_args {
299 grpc_closure closure;
300 grpc_slice slice;
301 };
302
shutdown_cleanup(void * arg,grpc_error * error)303 static void shutdown_cleanup(void* arg, grpc_error* error) {
304 struct shutdown_cleanup_args* a =
305 static_cast<struct shutdown_cleanup_args*>(arg);
306 grpc_slice_unref_internal(a->slice);
307 gpr_free(a);
308 }
309
send_shutdown(grpc_channel * channel,bool send_goaway,grpc_error * send_disconnect)310 static void send_shutdown(grpc_channel* channel, bool send_goaway,
311 grpc_error* send_disconnect) {
312 struct shutdown_cleanup_args* sc =
313 static_cast<struct shutdown_cleanup_args*>(gpr_malloc(sizeof(*sc)));
314 GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
315 grpc_schedule_on_exec_ctx);
316 grpc_transport_op* op = grpc_make_transport_op(&sc->closure);
317 grpc_channel_element* elem;
318
319 op->goaway_error =
320 send_goaway ? grpc_error_set_int(
321 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"),
322 GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK)
323 : GRPC_ERROR_NONE;
324 op->set_accept_stream = true;
325 sc->slice = grpc_slice_from_copied_string("Server shutdown");
326 op->disconnect_with_error = send_disconnect;
327
328 elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
329 elem->filter->start_transport_op(elem, op);
330 }
331
channel_broadcaster_shutdown(channel_broadcaster * cb,bool send_goaway,grpc_error * force_disconnect)332 static void channel_broadcaster_shutdown(channel_broadcaster* cb,
333 bool send_goaway,
334 grpc_error* force_disconnect) {
335 size_t i;
336
337 for (i = 0; i < cb->num_channels; i++) {
338 send_shutdown(cb->channels[i], send_goaway,
339 GRPC_ERROR_REF(force_disconnect));
340 GRPC_CHANNEL_INTERNAL_UNREF(cb->channels[i], "broadcast");
341 }
342 gpr_free(cb->channels);
343 GRPC_ERROR_UNREF(force_disconnect);
344 }
345
346 /*
347 * request_matcher
348 */
349
request_matcher_init(request_matcher * rm,grpc_server * server)350 static void request_matcher_init(request_matcher* rm, grpc_server* server) {
351 rm->server = server;
352 rm->pending_head = rm->pending_tail = nullptr;
353 rm->requests_per_cq = static_cast<gpr_locked_mpscq*>(
354 gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count));
355 for (size_t i = 0; i < server->cq_count; i++) {
356 gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
357 }
358 }
359
request_matcher_destroy(request_matcher * rm)360 static void request_matcher_destroy(request_matcher* rm) {
361 for (size_t i = 0; i < rm->server->cq_count; i++) {
362 GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == nullptr);
363 gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
364 }
365 gpr_free(rm->requests_per_cq);
366 }
367
kill_zombie(void * elem,grpc_error * error)368 static void kill_zombie(void* elem, grpc_error* error) {
369 grpc_call_unref(
370 grpc_call_from_top_element(static_cast<grpc_call_element*>(elem)));
371 }
372
request_matcher_zombify_all_pending_calls(request_matcher * rm)373 static void request_matcher_zombify_all_pending_calls(request_matcher* rm) {
374 while (rm->pending_head) {
375 call_data* calld = rm->pending_head;
376 rm->pending_head = calld->pending_next;
377 gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
378 GRPC_CLOSURE_INIT(
379 &calld->kill_zombie_closure, kill_zombie,
380 grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
381 grpc_schedule_on_exec_ctx);
382 GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
383 }
384 }
385
request_matcher_kill_requests(grpc_server * server,request_matcher * rm,grpc_error * error)386 static void request_matcher_kill_requests(grpc_server* server,
387 request_matcher* rm,
388 grpc_error* error) {
389 requested_call* rc;
390 for (size_t i = 0; i < server->cq_count; i++) {
391 while ((rc = reinterpret_cast<requested_call*>(
392 gpr_locked_mpscq_pop(&rm->requests_per_cq[i]))) != nullptr) {
393 fail_call(server, i, rc, GRPC_ERROR_REF(error));
394 }
395 }
396 GRPC_ERROR_UNREF(error);
397 }
398
399 /*
400 * server proper
401 */
402
server_ref(grpc_server * server)403 static void server_ref(grpc_server* server) {
404 gpr_ref(&server->internal_refcount);
405 }
406
server_delete(grpc_server * server)407 static void server_delete(grpc_server* server) {
408 registered_method* rm;
409 size_t i;
410 server->channelz_server.reset();
411 grpc_channel_args_destroy(server->channel_args);
412 gpr_mu_destroy(&server->mu_global);
413 gpr_mu_destroy(&server->mu_call);
414 gpr_cv_destroy(&server->starting_cv);
415 while ((rm = server->registered_methods) != nullptr) {
416 server->registered_methods = rm->next;
417 if (server->started) {
418 request_matcher_destroy(&rm->matcher);
419 }
420 gpr_free(rm->method);
421 gpr_free(rm->host);
422 gpr_free(rm);
423 }
424 if (server->started) {
425 request_matcher_destroy(&server->unregistered_request_matcher);
426 }
427 for (i = 0; i < server->cq_count; i++) {
428 GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
429 }
430 gpr_free(server->cqs);
431 gpr_free(server->pollsets);
432 gpr_free(server->shutdown_tags);
433 gpr_free(server);
434 }
435
server_unref(grpc_server * server)436 static void server_unref(grpc_server* server) {
437 if (gpr_unref(&server->internal_refcount)) {
438 server_delete(server);
439 }
440 }
441
is_channel_orphaned(channel_data * chand)442 static int is_channel_orphaned(channel_data* chand) {
443 return chand->next == chand;
444 }
445
orphan_channel(channel_data * chand)446 static void orphan_channel(channel_data* chand) {
447 chand->next->prev = chand->prev;
448 chand->prev->next = chand->next;
449 chand->next = chand->prev = chand;
450 }
451
finish_destroy_channel(void * cd,grpc_error * error)452 static void finish_destroy_channel(void* cd, grpc_error* error) {
453 channel_data* chand = static_cast<channel_data*>(cd);
454 grpc_server* server = chand->server;
455 GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server");
456 server_unref(server);
457 }
458
destroy_channel(channel_data * chand,grpc_error * error)459 static void destroy_channel(channel_data* chand, grpc_error* error) {
460 if (is_channel_orphaned(chand)) return;
461 GPR_ASSERT(chand->server != nullptr);
462 orphan_channel(chand);
463 server_ref(chand->server);
464 maybe_finish_shutdown(chand->server);
465 GRPC_CLOSURE_INIT(&chand->finish_destroy_channel_closure,
466 finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
467
468 if (GRPC_TRACE_FLAG_ENABLED(grpc_server_channel_trace) &&
469 error != GRPC_ERROR_NONE) {
470 const char* msg = grpc_error_string(error);
471 gpr_log(GPR_INFO, "Disconnected client: %s", msg);
472 }
473 GRPC_ERROR_UNREF(error);
474
475 grpc_transport_op* op =
476 grpc_make_transport_op(&chand->finish_destroy_channel_closure);
477 op->set_accept_stream = true;
478 grpc_channel_next_op(grpc_channel_stack_element(
479 grpc_channel_get_channel_stack(chand->channel), 0),
480 op);
481 }
482
done_request_event(void * req,grpc_cq_completion * c)483 static void done_request_event(void* req, grpc_cq_completion* c) {
484 gpr_free(req);
485 }
486
publish_call(grpc_server * server,call_data * calld,size_t cq_idx,requested_call * rc)487 static void publish_call(grpc_server* server, call_data* calld, size_t cq_idx,
488 requested_call* rc) {
489 grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call);
490 grpc_call* call = calld->call;
491 *rc->call = call;
492 calld->cq_new = server->cqs[cq_idx];
493 GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
494 switch (rc->type) {
495 case BATCH_CALL:
496 GPR_ASSERT(calld->host_set);
497 GPR_ASSERT(calld->path_set);
498 rc->data.batch.details->host = grpc_slice_ref_internal(calld->host);
499 rc->data.batch.details->method = grpc_slice_ref_internal(calld->path);
500 rc->data.batch.details->deadline =
501 grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
502 rc->data.batch.details->flags = calld->recv_initial_metadata_flags;
503 break;
504 case REGISTERED_CALL:
505 *rc->data.registered.deadline =
506 grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
507 if (rc->data.registered.optional_payload) {
508 *rc->data.registered.optional_payload = calld->payload;
509 calld->payload = nullptr;
510 }
511 break;
512 default:
513 GPR_UNREACHABLE_CODE(return );
514 }
515
516 grpc_cq_end_op(calld->cq_new, rc->tag, GRPC_ERROR_NONE, done_request_event,
517 rc, &rc->completion, true);
518 }
519
publish_new_rpc(void * arg,grpc_error * error)520 static void publish_new_rpc(void* arg, grpc_error* error) {
521 grpc_call_element* call_elem = static_cast<grpc_call_element*>(arg);
522 call_data* calld = static_cast<call_data*>(call_elem->call_data);
523 channel_data* chand = static_cast<channel_data*>(call_elem->channel_data);
524 request_matcher* rm = calld->matcher;
525 grpc_server* server = rm->server;
526
527 if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
528 gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
529 GRPC_CLOSURE_INIT(
530 &calld->kill_zombie_closure, kill_zombie,
531 grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
532 grpc_schedule_on_exec_ctx);
533 GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_REF(error));
534 return;
535 }
536
537 for (size_t i = 0; i < server->cq_count; i++) {
538 size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
539 requested_call* rc = reinterpret_cast<requested_call*>(
540 gpr_locked_mpscq_try_pop(&rm->requests_per_cq[cq_idx]));
541 if (rc == nullptr) {
542 continue;
543 } else {
544 GRPC_STATS_INC_SERVER_CQS_CHECKED(i);
545 gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
546 publish_call(server, calld, cq_idx, rc);
547 return; /* early out */
548 }
549 }
550
551 /* no cq to take the request found: queue it on the slow list */
552 GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED();
553 gpr_mu_lock(&server->mu_call);
554
555 // We need to ensure that all the queues are empty. We do this under
556 // the server mu_call lock to ensure that if something is added to
557 // an empty request queue, it will block until the call is actually
558 // added to the pending list.
559 for (size_t i = 0; i < server->cq_count; i++) {
560 size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
561 requested_call* rc = reinterpret_cast<requested_call*>(
562 gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
563 if (rc == nullptr) {
564 continue;
565 } else {
566 gpr_mu_unlock(&server->mu_call);
567 GRPC_STATS_INC_SERVER_CQS_CHECKED(i + server->cq_count);
568 gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
569 publish_call(server, calld, cq_idx, rc);
570 return; /* early out */
571 }
572 }
573
574 gpr_atm_no_barrier_store(&calld->state, PENDING);
575 if (rm->pending_head == nullptr) {
576 rm->pending_tail = rm->pending_head = calld;
577 } else {
578 rm->pending_tail->pending_next = calld;
579 rm->pending_tail = calld;
580 }
581 calld->pending_next = nullptr;
582 gpr_mu_unlock(&server->mu_call);
583 }
584
finish_start_new_rpc(grpc_server * server,grpc_call_element * elem,request_matcher * rm,grpc_server_register_method_payload_handling payload_handling)585 static void finish_start_new_rpc(
586 grpc_server* server, grpc_call_element* elem, request_matcher* rm,
587 grpc_server_register_method_payload_handling payload_handling) {
588 call_data* calld = static_cast<call_data*>(elem->call_data);
589
590 if (gpr_atm_acq_load(&server->shutdown_flag)) {
591 gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
592 GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
593 grpc_schedule_on_exec_ctx);
594 GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
595 return;
596 }
597
598 calld->matcher = rm;
599
600 switch (payload_handling) {
601 case GRPC_SRM_PAYLOAD_NONE:
602 publish_new_rpc(elem, GRPC_ERROR_NONE);
603 break;
604 case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: {
605 grpc_op op;
606 op.op = GRPC_OP_RECV_MESSAGE;
607 op.flags = 0;
608 op.reserved = nullptr;
609 op.data.recv_message.recv_message = &calld->payload;
610 GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem,
611 grpc_schedule_on_exec_ctx);
612 grpc_call_start_batch_and_execute(calld->call, &op, 1, &calld->publish);
613 break;
614 }
615 }
616 }
617
start_new_rpc(grpc_call_element * elem)618 static void start_new_rpc(grpc_call_element* elem) {
619 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
620 call_data* calld = static_cast<call_data*>(elem->call_data);
621 grpc_server* server = chand->server;
622 uint32_t i;
623 uint32_t hash;
624 channel_registered_method* rm;
625
626 if (chand->registered_methods && calld->path_set && calld->host_set) {
627 /* TODO(ctiller): unify these two searches */
628 /* check for an exact match with host */
629 hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash_internal(calld->host),
630 grpc_slice_hash_internal(calld->path));
631 for (i = 0; i <= chand->registered_method_max_probes; i++) {
632 rm = &chand->registered_methods[(hash + i) %
633 chand->registered_method_slots];
634 if (!rm) break;
635 if (!rm->has_host) continue;
636 if (!grpc_slice_eq(rm->host, calld->host)) continue;
637 if (!grpc_slice_eq(rm->method, calld->path)) continue;
638 if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
639 0 == (calld->recv_initial_metadata_flags &
640 GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
641 continue;
642 }
643 finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher,
644 rm->server_registered_method->payload_handling);
645 return;
646 }
647 /* check for a wildcard method definition (no host set) */
648 hash = GRPC_MDSTR_KV_HASH(0, grpc_slice_hash_internal(calld->path));
649 for (i = 0; i <= chand->registered_method_max_probes; i++) {
650 rm = &chand->registered_methods[(hash + i) %
651 chand->registered_method_slots];
652 if (!rm) break;
653 if (rm->has_host) continue;
654 if (!grpc_slice_eq(rm->method, calld->path)) continue;
655 if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
656 0 == (calld->recv_initial_metadata_flags &
657 GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
658 continue;
659 }
660 finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher,
661 rm->server_registered_method->payload_handling);
662 return;
663 }
664 }
665 finish_start_new_rpc(server, elem, &server->unregistered_request_matcher,
666 GRPC_SRM_PAYLOAD_NONE);
667 }
668
num_listeners(grpc_server * server)669 static int num_listeners(grpc_server* server) {
670 listener* l;
671 int n = 0;
672 for (l = server->listeners; l; l = l->next) {
673 n++;
674 }
675 return n;
676 }
677
done_shutdown_event(void * server,grpc_cq_completion * completion)678 static void done_shutdown_event(void* server, grpc_cq_completion* completion) {
679 server_unref(static_cast<grpc_server*>(server));
680 }
681
num_channels(grpc_server * server)682 static int num_channels(grpc_server* server) {
683 channel_data* chand;
684 int n = 0;
685 for (chand = server->root_channel_data.next;
686 chand != &server->root_channel_data; chand = chand->next) {
687 n++;
688 }
689 return n;
690 }
691
kill_pending_work_locked(grpc_server * server,grpc_error * error)692 static void kill_pending_work_locked(grpc_server* server, grpc_error* error) {
693 if (server->started) {
694 request_matcher_kill_requests(server, &server->unregistered_request_matcher,
695 GRPC_ERROR_REF(error));
696 request_matcher_zombify_all_pending_calls(
697 &server->unregistered_request_matcher);
698 for (registered_method* rm = server->registered_methods; rm;
699 rm = rm->next) {
700 request_matcher_kill_requests(server, &rm->matcher,
701 GRPC_ERROR_REF(error));
702 request_matcher_zombify_all_pending_calls(&rm->matcher);
703 }
704 }
705 GRPC_ERROR_UNREF(error);
706 }
707
maybe_finish_shutdown(grpc_server * server)708 static void maybe_finish_shutdown(grpc_server* server) {
709 size_t i;
710 if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
711 return;
712 }
713
714 kill_pending_work_locked(
715 server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
716
717 if (server->root_channel_data.next != &server->root_channel_data ||
718 server->listeners_destroyed < num_listeners(server)) {
719 if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
720 server->last_shutdown_message_time),
721 gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
722 server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
723 gpr_log(GPR_DEBUG,
724 "Waiting for %d channels and %d/%d listeners to be destroyed"
725 " before shutting down server",
726 num_channels(server),
727 num_listeners(server) - server->listeners_destroyed,
728 num_listeners(server));
729 }
730 return;
731 }
732 server->shutdown_published = 1;
733 for (i = 0; i < server->num_shutdown_tags; i++) {
734 server_ref(server);
735 grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag,
736 GRPC_ERROR_NONE, done_shutdown_event, server,
737 &server->shutdown_tags[i].completion);
738 }
739 }
740
server_on_recv_initial_metadata(void * ptr,grpc_error * error)741 static void server_on_recv_initial_metadata(void* ptr, grpc_error* error) {
742 grpc_call_element* elem = static_cast<grpc_call_element*>(ptr);
743 call_data* calld = static_cast<call_data*>(elem->call_data);
744 grpc_millis op_deadline;
745
746 if (error == GRPC_ERROR_NONE) {
747 GPR_ASSERT(calld->recv_initial_metadata->idx.named.path != nullptr);
748 GPR_ASSERT(calld->recv_initial_metadata->idx.named.authority != nullptr);
749 calld->path = grpc_slice_ref_internal(
750 GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md));
751 calld->host = grpc_slice_ref_internal(
752 GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.authority->md));
753 calld->path_set = true;
754 calld->host_set = true;
755 grpc_metadata_batch_remove(calld->recv_initial_metadata,
756 calld->recv_initial_metadata->idx.named.path);
757 grpc_metadata_batch_remove(
758 calld->recv_initial_metadata,
759 calld->recv_initial_metadata->idx.named.authority);
760 } else {
761 GRPC_ERROR_REF(error);
762 }
763 op_deadline = calld->recv_initial_metadata->deadline;
764 if (op_deadline != GRPC_MILLIS_INF_FUTURE) {
765 calld->deadline = op_deadline;
766 }
767 if (calld->host_set && calld->path_set) {
768 /* do nothing */
769 } else {
770 /* Pass the error reference to calld->recv_initial_metadata_error */
771 grpc_error* src_error = error;
772 error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
773 "Missing :authority or :path", &src_error, 1);
774 GRPC_ERROR_UNREF(src_error);
775 calld->recv_initial_metadata_error = GRPC_ERROR_REF(error);
776 }
777 grpc_closure* closure = calld->on_done_recv_initial_metadata;
778 calld->on_done_recv_initial_metadata = nullptr;
779 if (calld->seen_recv_trailing_metadata_ready) {
780 GRPC_CALL_COMBINER_START(calld->call_combiner,
781 &calld->recv_trailing_metadata_ready,
782 calld->recv_trailing_metadata_error,
783 "continue server_recv_trailing_metadata_ready");
784 }
785 GRPC_CLOSURE_RUN(closure, error);
786 }
787
server_recv_trailing_metadata_ready(void * user_data,grpc_error * error)788 static void server_recv_trailing_metadata_ready(void* user_data,
789 grpc_error* error) {
790 grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
791 call_data* calld = static_cast<call_data*>(elem->call_data);
792 if (calld->on_done_recv_initial_metadata != nullptr) {
793 calld->recv_trailing_metadata_error = GRPC_ERROR_REF(error);
794 calld->seen_recv_trailing_metadata_ready = true;
795 GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
796 server_recv_trailing_metadata_ready, elem,
797 grpc_schedule_on_exec_ctx);
798 GRPC_CALL_COMBINER_STOP(calld->call_combiner,
799 "deferring server_recv_trailing_metadata_ready "
800 "until after server_on_recv_initial_metadata");
801 return;
802 }
803 error =
804 grpc_error_add_child(GRPC_ERROR_REF(error),
805 GRPC_ERROR_REF(calld->recv_initial_metadata_error));
806 GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, error);
807 }
808
server_mutate_op(grpc_call_element * elem,grpc_transport_stream_op_batch * op)809 static void server_mutate_op(grpc_call_element* elem,
810 grpc_transport_stream_op_batch* op) {
811 call_data* calld = static_cast<call_data*>(elem->call_data);
812
813 if (op->recv_initial_metadata) {
814 GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == nullptr);
815 calld->recv_initial_metadata =
816 op->payload->recv_initial_metadata.recv_initial_metadata;
817 calld->on_done_recv_initial_metadata =
818 op->payload->recv_initial_metadata.recv_initial_metadata_ready;
819 op->payload->recv_initial_metadata.recv_initial_metadata_ready =
820 &calld->server_on_recv_initial_metadata;
821 op->payload->recv_initial_metadata.recv_flags =
822 &calld->recv_initial_metadata_flags;
823 }
824 if (op->recv_trailing_metadata) {
825 calld->original_recv_trailing_metadata_ready =
826 op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
827 op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
828 &calld->recv_trailing_metadata_ready;
829 }
830 }
831
server_start_transport_stream_op_batch(grpc_call_element * elem,grpc_transport_stream_op_batch * op)832 static void server_start_transport_stream_op_batch(
833 grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
834 server_mutate_op(elem, op);
835 grpc_call_next_op(elem, op);
836 }
837
got_initial_metadata(void * ptr,grpc_error * error)838 static void got_initial_metadata(void* ptr, grpc_error* error) {
839 grpc_call_element* elem = static_cast<grpc_call_element*>(ptr);
840 call_data* calld = static_cast<call_data*>(elem->call_data);
841 if (error == GRPC_ERROR_NONE) {
842 start_new_rpc(elem);
843 } else {
844 if (gpr_atm_full_cas(&calld->state, NOT_STARTED, ZOMBIED)) {
845 GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
846 grpc_schedule_on_exec_ctx);
847 GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
848 } else if (gpr_atm_full_cas(&calld->state, PENDING, ZOMBIED)) {
849 /* zombied call will be destroyed when it's removed from the pending
850 queue... later */
851 }
852 }
853 }
854
accept_stream(void * cd,grpc_transport * transport,const void * transport_server_data)855 static void accept_stream(void* cd, grpc_transport* transport,
856 const void* transport_server_data) {
857 channel_data* chand = static_cast<channel_data*>(cd);
858 /* create a call */
859 grpc_call_create_args args;
860 args.channel = chand->channel;
861 args.server = chand->server;
862 args.parent = nullptr;
863 args.propagation_mask = 0;
864 args.cq = nullptr;
865 args.pollset_set_alternative = nullptr;
866 args.server_transport_data = transport_server_data;
867 args.add_initial_metadata = nullptr;
868 args.add_initial_metadata_count = 0;
869 args.send_deadline = GRPC_MILLIS_INF_FUTURE;
870 grpc_call* call;
871 grpc_error* error = grpc_call_create(&args, &call);
872 grpc_call_element* elem =
873 grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
874 if (error != GRPC_ERROR_NONE) {
875 got_initial_metadata(elem, error);
876 GRPC_ERROR_UNREF(error);
877 return;
878 }
879 call_data* calld = static_cast<call_data*>(elem->call_data);
880 grpc_op op;
881 op.op = GRPC_OP_RECV_INITIAL_METADATA;
882 op.flags = 0;
883 op.reserved = nullptr;
884 op.data.recv_initial_metadata.recv_initial_metadata =
885 &calld->initial_metadata;
886 GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
887 grpc_schedule_on_exec_ctx);
888 grpc_call_start_batch_and_execute(call, &op, 1, &calld->got_initial_metadata);
889 }
890
channel_connectivity_changed(void * cd,grpc_error * error)891 static void channel_connectivity_changed(void* cd, grpc_error* error) {
892 channel_data* chand = static_cast<channel_data*>(cd);
893 grpc_server* server = chand->server;
894 if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
895 grpc_transport_op* op = grpc_make_transport_op(nullptr);
896 op->on_connectivity_state_change = &chand->channel_connectivity_changed;
897 op->connectivity_state = &chand->connectivity_state;
898 grpc_channel_next_op(grpc_channel_stack_element(
899 grpc_channel_get_channel_stack(chand->channel), 0),
900 op);
901 } else {
902 gpr_mu_lock(&server->mu_global);
903 destroy_channel(chand, GRPC_ERROR_REF(error));
904 gpr_mu_unlock(&server->mu_global);
905 GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "connectivity");
906 }
907 }
908
init_call_elem(grpc_call_element * elem,const grpc_call_element_args * args)909 static grpc_error* init_call_elem(grpc_call_element* elem,
910 const grpc_call_element_args* args) {
911 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
912 server_ref(chand->server);
913 new (elem->call_data) call_data(elem, *args);
914 return GRPC_ERROR_NONE;
915 }
916
destroy_call_elem(grpc_call_element * elem,const grpc_call_final_info * final_info,grpc_closure * ignored)917 static void destroy_call_elem(grpc_call_element* elem,
918 const grpc_call_final_info* final_info,
919 grpc_closure* ignored) {
920 call_data* calld = static_cast<call_data*>(elem->call_data);
921 calld->~call_data();
922 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
923 server_unref(chand->server);
924 }
925
init_channel_elem(grpc_channel_element * elem,grpc_channel_element_args * args)926 static grpc_error* init_channel_elem(grpc_channel_element* elem,
927 grpc_channel_element_args* args) {
928 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
929 GPR_ASSERT(args->is_first);
930 GPR_ASSERT(!args->is_last);
931 chand->server = nullptr;
932 chand->channel = nullptr;
933 chand->next = chand->prev = chand;
934 chand->registered_methods = nullptr;
935 chand->connectivity_state = GRPC_CHANNEL_IDLE;
936 GRPC_CLOSURE_INIT(&chand->channel_connectivity_changed,
937 channel_connectivity_changed, chand,
938 grpc_schedule_on_exec_ctx);
939 return GRPC_ERROR_NONE;
940 }
941
destroy_channel_elem(grpc_channel_element * elem)942 static void destroy_channel_elem(grpc_channel_element* elem) {
943 size_t i;
944 channel_data* chand = static_cast<channel_data*>(elem->channel_data);
945 if (chand->registered_methods) {
946 for (i = 0; i < chand->registered_method_slots; i++) {
947 grpc_slice_unref_internal(chand->registered_methods[i].method);
948 if (chand->registered_methods[i].has_host) {
949 grpc_slice_unref_internal(chand->registered_methods[i].host);
950 }
951 }
952 gpr_free(chand->registered_methods);
953 }
954 if (chand->server) {
955 if (chand->server->channelz_server != nullptr &&
956 chand->channelz_socket_uuid != 0) {
957 chand->server->channelz_server->RemoveChildSocket(
958 chand->channelz_socket_uuid);
959 }
960 gpr_mu_lock(&chand->server->mu_global);
961 chand->next->prev = chand->prev;
962 chand->prev->next = chand->next;
963 chand->next = chand->prev = chand;
964 maybe_finish_shutdown(chand->server);
965 gpr_mu_unlock(&chand->server->mu_global);
966 server_unref(chand->server);
967 }
968 }
969
970 const grpc_channel_filter grpc_server_top_filter = {
971 server_start_transport_stream_op_batch,
972 grpc_channel_next_op,
973 sizeof(call_data),
974 init_call_elem,
975 grpc_call_stack_ignore_set_pollset_or_pollset_set,
976 destroy_call_elem,
977 sizeof(channel_data),
978 init_channel_elem,
979 destroy_channel_elem,
980 grpc_channel_next_get_info,
981 "server",
982 };
983
register_completion_queue(grpc_server * server,grpc_completion_queue * cq,void * reserved)984 static void register_completion_queue(grpc_server* server,
985 grpc_completion_queue* cq,
986 void* reserved) {
987 size_t i, n;
988 GPR_ASSERT(!reserved);
989 for (i = 0; i < server->cq_count; i++) {
990 if (server->cqs[i] == cq) return;
991 }
992
993 GRPC_CQ_INTERNAL_REF(cq, "server");
994 n = server->cq_count++;
995 server->cqs = static_cast<grpc_completion_queue**>(gpr_realloc(
996 server->cqs, server->cq_count * sizeof(grpc_completion_queue*)));
997 server->cqs[n] = cq;
998 }
999
grpc_server_register_completion_queue(grpc_server * server,grpc_completion_queue * cq,void * reserved)1000 void grpc_server_register_completion_queue(grpc_server* server,
1001 grpc_completion_queue* cq,
1002 void* reserved) {
1003 GRPC_API_TRACE(
1004 "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
1005 (server, cq, reserved));
1006
1007 auto cq_type = grpc_get_cq_completion_type(cq);
1008 if (cq_type != GRPC_CQ_NEXT && cq_type != GRPC_CQ_CALLBACK) {
1009 gpr_log(GPR_INFO,
1010 "Completion queue of type %d is being registered as a "
1011 "server-completion-queue",
1012 static_cast<int>(cq_type));
1013 /* Ideally we should log an error and abort but ruby-wrapped-language API
1014 calls grpc_completion_queue_pluck() on server completion queues */
1015 }
1016
1017 register_completion_queue(server, cq, reserved);
1018 }
1019
grpc_server_create(const grpc_channel_args * args,void * reserved)1020 grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
1021 grpc_core::ExecCtx exec_ctx;
1022 GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
1023
1024 grpc_server* server =
1025 static_cast<grpc_server*>(gpr_zalloc(sizeof(grpc_server)));
1026
1027 gpr_mu_init(&server->mu_global);
1028 gpr_mu_init(&server->mu_call);
1029 gpr_cv_init(&server->starting_cv);
1030
1031 /* decremented by grpc_server_destroy */
1032 gpr_ref_init(&server->internal_refcount, 1);
1033 server->root_channel_data.next = server->root_channel_data.prev =
1034 &server->root_channel_data;
1035
1036 server->channel_args = grpc_channel_args_copy(args);
1037
1038 const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_ENABLE_CHANNELZ);
1039 if (grpc_channel_arg_get_bool(arg, GRPC_ENABLE_CHANNELZ_DEFAULT)) {
1040 arg = grpc_channel_args_find(
1041 args, GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE);
1042 size_t channel_tracer_max_memory = grpc_channel_arg_get_integer(
1043 arg,
1044 {GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX});
1045 server->channelz_server =
1046 grpc_core::MakeRefCounted<grpc_core::channelz::ServerNode>(
1047 server, channel_tracer_max_memory);
1048 server->channelz_server->AddTraceEvent(
1049 grpc_core::channelz::ChannelTrace::Severity::Info,
1050 grpc_slice_from_static_string("Server created"));
1051 }
1052
1053 if (args != nullptr) {
1054 grpc_resource_quota* resource_quota =
1055 grpc_resource_quota_from_channel_args(args, false /* create */);
1056 if (resource_quota != nullptr) {
1057 server->default_resource_user =
1058 grpc_resource_user_create(resource_quota, "default");
1059 }
1060 }
1061
1062 return server;
1063 }
1064
streq(const char * a,const char * b)1065 static int streq(const char* a, const char* b) {
1066 if (a == nullptr && b == nullptr) return 1;
1067 if (a == nullptr) return 0;
1068 if (b == nullptr) return 0;
1069 return 0 == strcmp(a, b);
1070 }
1071
grpc_server_register_method(grpc_server * server,const char * method,const char * host,grpc_server_register_method_payload_handling payload_handling,uint32_t flags)1072 void* grpc_server_register_method(
1073 grpc_server* server, const char* method, const char* host,
1074 grpc_server_register_method_payload_handling payload_handling,
1075 uint32_t flags) {
1076 registered_method* m;
1077 GRPC_API_TRACE(
1078 "grpc_server_register_method(server=%p, method=%s, host=%s, "
1079 "flags=0x%08x)",
1080 4, (server, method, host, flags));
1081 if (!method) {
1082 gpr_log(GPR_ERROR,
1083 "grpc_server_register_method method string cannot be NULL");
1084 return nullptr;
1085 }
1086 for (m = server->registered_methods; m; m = m->next) {
1087 if (streq(m->method, method) && streq(m->host, host)) {
1088 gpr_log(GPR_ERROR, "duplicate registration for %s@%s", method,
1089 host ? host : "*");
1090 return nullptr;
1091 }
1092 }
1093 if ((flags & ~GRPC_INITIAL_METADATA_USED_MASK) != 0) {
1094 gpr_log(GPR_ERROR, "grpc_server_register_method invalid flags 0x%08x",
1095 flags);
1096 return nullptr;
1097 }
1098 m = static_cast<registered_method*>(gpr_zalloc(sizeof(registered_method)));
1099 m->method = gpr_strdup(method);
1100 m->host = gpr_strdup(host);
1101 m->next = server->registered_methods;
1102 m->payload_handling = payload_handling;
1103 m->flags = flags;
1104 server->registered_methods = m;
1105 return m;
1106 }
1107
grpc_server_start(grpc_server * server)1108 void grpc_server_start(grpc_server* server) {
1109 size_t i;
1110 grpc_core::ExecCtx exec_ctx;
1111
1112 GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
1113
1114 server->started = true;
1115 server->pollset_count = 0;
1116 server->pollsets = static_cast<grpc_pollset**>(
1117 gpr_malloc(sizeof(grpc_pollset*) * server->cq_count));
1118 for (i = 0; i < server->cq_count; i++) {
1119 if (grpc_cq_can_listen(server->cqs[i])) {
1120 server->pollsets[server->pollset_count++] =
1121 grpc_cq_pollset(server->cqs[i]);
1122 }
1123 }
1124 request_matcher_init(&server->unregistered_request_matcher, server);
1125 for (registered_method* rm = server->registered_methods; rm; rm = rm->next) {
1126 request_matcher_init(&rm->matcher, server);
1127 }
1128
1129 gpr_mu_lock(&server->mu_global);
1130 server->starting = true;
1131 gpr_mu_unlock(&server->mu_global);
1132
1133 for (listener* l = server->listeners; l; l = l->next) {
1134 l->start(server, l->arg, server->pollsets, server->pollset_count);
1135 }
1136
1137 gpr_mu_lock(&server->mu_global);
1138 server->starting = false;
1139 gpr_cv_signal(&server->starting_cv);
1140 gpr_mu_unlock(&server->mu_global);
1141 }
1142
grpc_server_get_pollsets(grpc_server * server,grpc_pollset *** pollsets,size_t * pollset_count)1143 void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
1144 size_t* pollset_count) {
1145 *pollset_count = server->pollset_count;
1146 *pollsets = server->pollsets;
1147 }
1148
grpc_server_setup_transport(grpc_server * s,grpc_transport * transport,grpc_pollset * accepting_pollset,const grpc_channel_args * args,const grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> & socket_node,grpc_resource_user * resource_user)1149 void grpc_server_setup_transport(
1150 grpc_server* s, grpc_transport* transport, grpc_pollset* accepting_pollset,
1151 const grpc_channel_args* args,
1152 const grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>&
1153 socket_node,
1154 grpc_resource_user* resource_user) {
1155 size_t num_registered_methods;
1156 size_t alloc;
1157 registered_method* rm;
1158 channel_registered_method* crm;
1159 grpc_channel* channel;
1160 channel_data* chand;
1161 uint32_t hash;
1162 size_t slots;
1163 uint32_t probes;
1164 uint32_t max_probes = 0;
1165 grpc_transport_op* op = nullptr;
1166
1167 channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport,
1168 resource_user);
1169 chand = static_cast<channel_data*>(
1170 grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0)
1171 ->channel_data);
1172 chand->server = s;
1173 server_ref(s);
1174 chand->channel = channel;
1175 if (socket_node != nullptr) {
1176 chand->channelz_socket_uuid = socket_node->uuid();
1177 s->channelz_server->AddChildSocket(socket_node);
1178 } else {
1179 chand->channelz_socket_uuid = 0;
1180 }
1181
1182 size_t cq_idx;
1183 for (cq_idx = 0; cq_idx < s->cq_count; cq_idx++) {
1184 if (grpc_cq_pollset(s->cqs[cq_idx]) == accepting_pollset) break;
1185 }
1186 if (cq_idx == s->cq_count) {
1187 /* completion queue not found: pick a random one to publish new calls to */
1188 cq_idx = static_cast<size_t>(rand()) % s->cq_count;
1189 }
1190 chand->cq_idx = cq_idx;
1191
1192 num_registered_methods = 0;
1193 for (rm = s->registered_methods; rm; rm = rm->next) {
1194 num_registered_methods++;
1195 }
1196 /* build a lookup table phrased in terms of mdstr's in this channels context
1197 to quickly find registered methods */
1198 if (num_registered_methods > 0) {
1199 slots = 2 * num_registered_methods;
1200 alloc = sizeof(channel_registered_method) * slots;
1201 chand->registered_methods =
1202 static_cast<channel_registered_method*>(gpr_zalloc(alloc));
1203 for (rm = s->registered_methods; rm; rm = rm->next) {
1204 grpc_slice host;
1205 bool has_host;
1206 grpc_slice method;
1207 if (rm->host != nullptr) {
1208 host = grpc_slice_from_static_string(rm->host);
1209 has_host = true;
1210 } else {
1211 has_host = false;
1212 }
1213 method = grpc_slice_from_static_string(rm->method);
1214 hash = GRPC_MDSTR_KV_HASH(has_host ? grpc_slice_hash_internal(host) : 0,
1215 grpc_slice_hash_internal(method));
1216 for (probes = 0; chand->registered_methods[(hash + probes) % slots]
1217 .server_registered_method != nullptr;
1218 probes++)
1219 ;
1220 if (probes > max_probes) max_probes = probes;
1221 crm = &chand->registered_methods[(hash + probes) % slots];
1222 crm->server_registered_method = rm;
1223 crm->flags = rm->flags;
1224 crm->has_host = has_host;
1225 if (has_host) {
1226 crm->host = host;
1227 }
1228 crm->method = method;
1229 }
1230 GPR_ASSERT(slots <= UINT32_MAX);
1231 chand->registered_method_slots = static_cast<uint32_t>(slots);
1232 chand->registered_method_max_probes = max_probes;
1233 }
1234
1235 gpr_mu_lock(&s->mu_global);
1236 chand->next = &s->root_channel_data;
1237 chand->prev = chand->next->prev;
1238 chand->next->prev = chand->prev->next = chand;
1239 gpr_mu_unlock(&s->mu_global);
1240
1241 GRPC_CHANNEL_INTERNAL_REF(channel, "connectivity");
1242 op = grpc_make_transport_op(nullptr);
1243 op->set_accept_stream = true;
1244 op->set_accept_stream_fn = accept_stream;
1245 op->set_accept_stream_user_data = chand;
1246 op->on_connectivity_state_change = &chand->channel_connectivity_changed;
1247 op->connectivity_state = &chand->connectivity_state;
1248 if (gpr_atm_acq_load(&s->shutdown_flag) != 0) {
1249 op->disconnect_with_error =
1250 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown");
1251 }
1252 grpc_transport_perform_op(transport, op);
1253 }
1254
grpc_server_populate_listen_sockets(grpc_server * server,grpc_core::channelz::ChildRefsList * listen_sockets)1255 void grpc_server_populate_listen_sockets(
1256 grpc_server* server, grpc_core::channelz::ChildRefsList* listen_sockets) {
1257 gpr_mu_lock(&server->mu_global);
1258 for (listener* l = server->listeners; l != nullptr; l = l->next) {
1259 listen_sockets->push_back(l->socket_uuid);
1260 }
1261 gpr_mu_unlock(&server->mu_global);
1262 }
1263
done_published_shutdown(void * done_arg,grpc_cq_completion * storage)1264 void done_published_shutdown(void* done_arg, grpc_cq_completion* storage) {
1265 (void)done_arg;
1266 gpr_free(storage);
1267 }
1268
listener_destroy_done(void * s,grpc_error * error)1269 static void listener_destroy_done(void* s, grpc_error* error) {
1270 grpc_server* server = static_cast<grpc_server*>(s);
1271 gpr_mu_lock(&server->mu_global);
1272 server->listeners_destroyed++;
1273 maybe_finish_shutdown(server);
1274 gpr_mu_unlock(&server->mu_global);
1275 }
1276
1277 /*
1278 - Kills all pending requests-for-incoming-RPC-calls (i.e the requests made via
1279 grpc_server_request_call and grpc_server_request_registered call will now be
1280 cancelled). See 'kill_pending_work_locked()'
1281
1282 - Shuts down the listeners (i.e the server will no longer listen on the port
1283 for new incoming channels).
1284
1285 - Iterates through all channels on the server and sends shutdown msg (see
1286 'channel_broadcaster_shutdown()' for details) to the clients via the
1287 transport layer. The transport layer then guarantees the following:
1288 -- Sends shutdown to the client (for eg: HTTP2 transport sends GOAWAY)
1289 -- If the server has outstanding calls that are in the process, the
1290 connection is NOT closed until the server is done with all those calls
1291 -- Once, there are no more calls in progress, the channel is closed
1292 */
grpc_server_shutdown_and_notify(grpc_server * server,grpc_completion_queue * cq,void * tag)1293 void grpc_server_shutdown_and_notify(grpc_server* server,
1294 grpc_completion_queue* cq, void* tag) {
1295 listener* l;
1296 shutdown_tag* sdt;
1297 channel_broadcaster broadcaster;
1298 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1299 grpc_core::ExecCtx exec_ctx;
1300
1301 GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
1302 (server, cq, tag));
1303
1304 /* wait for startup to be finished: locks mu_global */
1305 gpr_mu_lock(&server->mu_global);
1306 while (server->starting) {
1307 gpr_cv_wait(&server->starting_cv, &server->mu_global,
1308 gpr_inf_future(GPR_CLOCK_MONOTONIC));
1309 }
1310
1311 /* stay locked, and gather up some stuff to do */
1312 GPR_ASSERT(grpc_cq_begin_op(cq, tag));
1313 if (server->shutdown_published) {
1314 grpc_cq_end_op(cq, tag, GRPC_ERROR_NONE, done_published_shutdown, nullptr,
1315 static_cast<grpc_cq_completion*>(
1316 gpr_malloc(sizeof(grpc_cq_completion))));
1317 gpr_mu_unlock(&server->mu_global);
1318 return;
1319 }
1320 server->shutdown_tags = static_cast<shutdown_tag*>(
1321 gpr_realloc(server->shutdown_tags,
1322 sizeof(shutdown_tag) * (server->num_shutdown_tags + 1)));
1323 sdt = &server->shutdown_tags[server->num_shutdown_tags++];
1324 sdt->tag = tag;
1325 sdt->cq = cq;
1326 if (gpr_atm_acq_load(&server->shutdown_flag)) {
1327 gpr_mu_unlock(&server->mu_global);
1328 return;
1329 }
1330
1331 server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
1332
1333 channel_broadcaster_init(server, &broadcaster);
1334
1335 gpr_atm_rel_store(&server->shutdown_flag, 1);
1336
1337 /* collect all unregistered then registered calls */
1338 gpr_mu_lock(&server->mu_call);
1339 kill_pending_work_locked(
1340 server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
1341 gpr_mu_unlock(&server->mu_call);
1342
1343 maybe_finish_shutdown(server);
1344 gpr_mu_unlock(&server->mu_global);
1345
1346 /* Shutdown listeners */
1347 for (l = server->listeners; l; l = l->next) {
1348 GRPC_CLOSURE_INIT(&l->destroy_done, listener_destroy_done, server,
1349 grpc_schedule_on_exec_ctx);
1350 l->destroy(server, l->arg, &l->destroy_done);
1351 }
1352
1353 channel_broadcaster_shutdown(&broadcaster, true /* send_goaway */,
1354 GRPC_ERROR_NONE);
1355
1356 if (server->default_resource_user != nullptr) {
1357 grpc_resource_quota_unref(
1358 grpc_resource_user_quota(server->default_resource_user));
1359 grpc_resource_user_shutdown(server->default_resource_user);
1360 grpc_resource_user_unref(server->default_resource_user);
1361 }
1362 }
1363
grpc_server_cancel_all_calls(grpc_server * server)1364 void grpc_server_cancel_all_calls(grpc_server* server) {
1365 channel_broadcaster broadcaster;
1366 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1367 grpc_core::ExecCtx exec_ctx;
1368
1369 GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
1370
1371 gpr_mu_lock(&server->mu_global);
1372 channel_broadcaster_init(server, &broadcaster);
1373 gpr_mu_unlock(&server->mu_global);
1374
1375 channel_broadcaster_shutdown(
1376 &broadcaster, false /* send_goaway */,
1377 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Cancelling all calls"));
1378 }
1379
grpc_server_destroy(grpc_server * server)1380 void grpc_server_destroy(grpc_server* server) {
1381 listener* l;
1382 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1383 grpc_core::ExecCtx exec_ctx;
1384
1385 GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
1386
1387 gpr_mu_lock(&server->mu_global);
1388 GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
1389 GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
1390
1391 while (server->listeners) {
1392 l = server->listeners;
1393 server->listeners = l->next;
1394 gpr_free(l);
1395 }
1396
1397 gpr_mu_unlock(&server->mu_global);
1398
1399 server_unref(server);
1400 }
1401
grpc_server_add_listener(grpc_server * server,void * arg,void (* start)(grpc_server * server,void * arg,grpc_pollset ** pollsets,size_t pollset_count),void (* destroy)(grpc_server * server,void * arg,grpc_closure * on_done),intptr_t socket_uuid)1402 void grpc_server_add_listener(grpc_server* server, void* arg,
1403 void (*start)(grpc_server* server, void* arg,
1404 grpc_pollset** pollsets,
1405 size_t pollset_count),
1406 void (*destroy)(grpc_server* server, void* arg,
1407 grpc_closure* on_done),
1408 intptr_t socket_uuid) {
1409 listener* l = static_cast<listener*>(gpr_malloc(sizeof(listener)));
1410 l->arg = arg;
1411 l->start = start;
1412 l->destroy = destroy;
1413 l->socket_uuid = socket_uuid;
1414 l->next = server->listeners;
1415 server->listeners = l;
1416 }
1417
queue_call_request(grpc_server * server,size_t cq_idx,requested_call * rc)1418 static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx,
1419 requested_call* rc) {
1420 call_data* calld = nullptr;
1421 request_matcher* rm = nullptr;
1422 if (gpr_atm_acq_load(&server->shutdown_flag)) {
1423 fail_call(server, cq_idx, rc,
1424 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
1425 return GRPC_CALL_OK;
1426 }
1427 switch (rc->type) {
1428 case BATCH_CALL:
1429 rm = &server->unregistered_request_matcher;
1430 break;
1431 case REGISTERED_CALL:
1432 rm = &rc->data.registered.method->matcher;
1433 break;
1434 }
1435 if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
1436 /* this was the first queued request: we need to lock and start
1437 matching calls */
1438 gpr_mu_lock(&server->mu_call);
1439 while ((calld = rm->pending_head) != nullptr) {
1440 rc = reinterpret_cast<requested_call*>(
1441 gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
1442 if (rc == nullptr) break;
1443 rm->pending_head = calld->pending_next;
1444 gpr_mu_unlock(&server->mu_call);
1445 if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
1446 // Zombied Call
1447 GRPC_CLOSURE_INIT(
1448 &calld->kill_zombie_closure, kill_zombie,
1449 grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
1450 grpc_schedule_on_exec_ctx);
1451 GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
1452 } else {
1453 publish_call(server, calld, cq_idx, rc);
1454 }
1455 gpr_mu_lock(&server->mu_call);
1456 }
1457 gpr_mu_unlock(&server->mu_call);
1458 }
1459 return GRPC_CALL_OK;
1460 }
1461
grpc_server_request_call(grpc_server * server,grpc_call ** call,grpc_call_details * details,grpc_metadata_array * initial_metadata,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag)1462 grpc_call_error grpc_server_request_call(
1463 grpc_server* server, grpc_call** call, grpc_call_details* details,
1464 grpc_metadata_array* initial_metadata,
1465 grpc_completion_queue* cq_bound_to_call,
1466 grpc_completion_queue* cq_for_notification, void* tag) {
1467 grpc_call_error error;
1468 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1469 grpc_core::ExecCtx exec_ctx;
1470 requested_call* rc = static_cast<requested_call*>(gpr_malloc(sizeof(*rc)));
1471 GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
1472 GRPC_API_TRACE(
1473 "grpc_server_request_call("
1474 "server=%p, call=%p, details=%p, initial_metadata=%p, "
1475 "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
1476 7,
1477 (server, call, details, initial_metadata, cq_bound_to_call,
1478 cq_for_notification, tag));
1479 size_t cq_idx;
1480 for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
1481 if (server->cqs[cq_idx] == cq_for_notification) {
1482 break;
1483 }
1484 }
1485 if (cq_idx == server->cq_count) {
1486 gpr_free(rc);
1487 error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
1488 goto done;
1489 }
1490 if (grpc_cq_begin_op(cq_for_notification, tag) == false) {
1491 gpr_free(rc);
1492 error = GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
1493 goto done;
1494 }
1495 details->reserved = nullptr;
1496 rc->cq_idx = cq_idx;
1497 rc->type = BATCH_CALL;
1498 rc->server = server;
1499 rc->tag = tag;
1500 rc->cq_bound_to_call = cq_bound_to_call;
1501 rc->call = call;
1502 rc->data.batch.details = details;
1503 rc->initial_metadata = initial_metadata;
1504 error = queue_call_request(server, cq_idx, rc);
1505 done:
1506
1507 return error;
1508 }
1509
grpc_server_request_registered_call(grpc_server * server,void * rmp,grpc_call ** call,gpr_timespec * deadline,grpc_metadata_array * initial_metadata,grpc_byte_buffer ** optional_payload,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag)1510 grpc_call_error grpc_server_request_registered_call(
1511 grpc_server* server, void* rmp, grpc_call** call, gpr_timespec* deadline,
1512 grpc_metadata_array* initial_metadata, grpc_byte_buffer** optional_payload,
1513 grpc_completion_queue* cq_bound_to_call,
1514 grpc_completion_queue* cq_for_notification, void* tag) {
1515 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1516 grpc_core::ExecCtx exec_ctx;
1517 GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
1518 requested_call* rc = static_cast<requested_call*>(gpr_malloc(sizeof(*rc)));
1519 registered_method* rm = static_cast<registered_method*>(rmp);
1520 GRPC_API_TRACE(
1521 "grpc_server_request_registered_call("
1522 "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
1523 "optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
1524 "tag=%p)",
1525 9,
1526 (server, rmp, call, deadline, initial_metadata, optional_payload,
1527 cq_bound_to_call, cq_for_notification, tag));
1528
1529 size_t cq_idx;
1530 for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
1531 if (server->cqs[cq_idx] == cq_for_notification) {
1532 break;
1533 }
1534 }
1535 if (cq_idx == server->cq_count) {
1536 gpr_free(rc);
1537 return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
1538 }
1539 if ((optional_payload == nullptr) !=
1540 (rm->payload_handling == GRPC_SRM_PAYLOAD_NONE)) {
1541 gpr_free(rc);
1542 return GRPC_CALL_ERROR_PAYLOAD_TYPE_MISMATCH;
1543 }
1544
1545 if (grpc_cq_begin_op(cq_for_notification, tag) == false) {
1546 gpr_free(rc);
1547 return GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
1548 }
1549 rc->cq_idx = cq_idx;
1550 rc->type = REGISTERED_CALL;
1551 rc->server = server;
1552 rc->tag = tag;
1553 rc->cq_bound_to_call = cq_bound_to_call;
1554 rc->call = call;
1555 rc->data.registered.method = rm;
1556 rc->data.registered.deadline = deadline;
1557 rc->initial_metadata = initial_metadata;
1558 rc->data.registered.optional_payload = optional_payload;
1559 return queue_call_request(server, cq_idx, rc);
1560 }
1561
fail_call(grpc_server * server,size_t cq_idx,requested_call * rc,grpc_error * error)1562 static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc,
1563 grpc_error* error) {
1564 *rc->call = nullptr;
1565 rc->initial_metadata->count = 0;
1566 GPR_ASSERT(error != GRPC_ERROR_NONE);
1567
1568 grpc_cq_end_op(server->cqs[cq_idx], rc->tag, error, done_request_event, rc,
1569 &rc->completion);
1570 }
1571
grpc_server_get_channel_args(grpc_server * server)1572 const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server) {
1573 return server->channel_args;
1574 }
1575
grpc_server_get_default_resource_user(grpc_server * server)1576 grpc_resource_user* grpc_server_get_default_resource_user(grpc_server* server) {
1577 return server->default_resource_user;
1578 }
1579
grpc_server_has_open_connections(grpc_server * server)1580 int grpc_server_has_open_connections(grpc_server* server) {
1581 int r;
1582 gpr_mu_lock(&server->mu_global);
1583 r = server->root_channel_data.next != &server->root_channel_data;
1584 gpr_mu_unlock(&server->mu_global);
1585 return r;
1586 }
1587
grpc_server_get_channelz_node(grpc_server * server)1588 grpc_core::channelz::ServerNode* grpc_server_get_channelz_node(
1589 grpc_server* server) {
1590 if (server == nullptr) {
1591 return nullptr;
1592 }
1593 return server->channelz_server.get();
1594 }
1595