1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #ifndef GRPC_CORE_LIB_TRANSPORT_METADATA_H
20 #define GRPC_CORE_LIB_TRANSPORT_METADATA_H
21 
22 #include <grpc/support/port_platform.h>
23 
24 #include <grpc/impl/codegen/log.h>
25 
26 #include <grpc/grpc.h>
27 #include <grpc/slice.h>
28 
29 #include "src/core/lib/debug/trace.h"
30 #include "src/core/lib/gpr/useful.h"
31 #include "src/core/lib/gprpp/atomic.h"
32 #include "src/core/lib/gprpp/sync.h"
33 #include "src/core/lib/slice/slice_utils.h"
34 
35 extern grpc_core::DebugOnlyTraceFlag grpc_trace_metadata;
36 
37 /* This file provides a mechanism for tracking metadata through the grpc stack.
38    It's not intended for consumption outside of the library.
39 
40    Metadata is tracked in the context of a sharded global grpc_mdctx. The
41    context tracks unique strings (grpc_mdstr) and pairs of strings
42    (grpc_mdelem). Any of these objects can be checked for equality by comparing
43    their pointers. These objects are reference counted.
44 
45    grpc_mdelem can additionally store a (non-NULL) user data pointer. This
46    pointer is intended to be used to cache semantic meaning of a metadata
47    element. For example, an OAuth token may cache the credentials it represents
48    and the time at which it expires in the mdelem user data.
49 
50    Combining this metadata cache and the hpack compression table allows us to
51    simply lookup complete preparsed objects quickly, incurring a few atomic
52    ops per metadata element on the fast path.
53 
54    grpc_mdelem instances MAY live longer than their refcount implies, and are
55    garbage collected periodically, meaning cached data can easily outlive a
56    single request.
57 
58    STATIC METADATA: in static_metadata.h we declare a set of static metadata.
59    These mdelems and mdstrs are available via pre-declared code generated macros
60    and are available to code anywhere between grpc_init() and grpc_shutdown().
61    They are not refcounted, but can be passed to _ref and _unref functions
62    declared here - in which case those functions are effectively no-ops. */
63 
64 /* Forward declarations */
65 typedef struct grpc_mdelem grpc_mdelem;
66 
67 /* if changing this, make identical changes in:
68    - grpc_core::{InternedMetadata, AllocatedMetadata}
69    - grpc_metadata in grpc_types.h */
70 typedef struct grpc_mdelem_data {
71   const grpc_slice key;
72   const grpc_slice value;
73   /* there is a private part to this in metadata.c */
74 } grpc_mdelem_data;
75 
76 /* GRPC_MDELEM_STORAGE_* enum values that can be treated as interned always have
77    this bit set in their integer value */
78 #define GRPC_MDELEM_STORAGE_INTERNED_BIT 1
79 
80 /* External and static storage metadata has no refcount to ref/unref. Allocated
81  * and interned metadata do have a refcount. Metadata ref and unref methods use
82  * a switch statement on this enum to determine which behaviour to execute.
83  * Keeping the no-ref cases together and the ref-cases together leads to
84  * slightly better code generation (9 inlined instructions rather than 10). */
85 typedef enum {
86   /* memory pointed to by grpc_mdelem::payload is owned by an external system */
87   GRPC_MDELEM_STORAGE_EXTERNAL = 0,
88   /* memory is in the static metadata table */
89   GRPC_MDELEM_STORAGE_STATIC = GRPC_MDELEM_STORAGE_INTERNED_BIT,
90   /* memory pointed to by grpc_mdelem::payload is allocated by the metadata
91      system */
92   GRPC_MDELEM_STORAGE_ALLOCATED = 2,
93   /* memory pointed to by grpc_mdelem::payload is interned by the metadata
94      system */
95   GRPC_MDELEM_STORAGE_INTERNED = 2 | GRPC_MDELEM_STORAGE_INTERNED_BIT,
96 } grpc_mdelem_data_storage;
97 
98 struct grpc_mdelem {
99   /* a grpc_mdelem_data* generally, with the two lower bits signalling memory
100      ownership as per grpc_mdelem_data_storage */
101   uintptr_t payload;
102 };
103 
104 #define GRPC_MDELEM_DATA(md) ((grpc_mdelem_data*)((md).payload & ~(uintptr_t)3))
105 #define GRPC_MDELEM_STORAGE(md) \
106   ((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3))
107 #ifdef __cplusplus
108 #define GRPC_MAKE_MDELEM(data, storage) \
109   (grpc_mdelem{((uintptr_t)(data)) | ((uintptr_t)storage)})
110 #else
111 #define GRPC_MAKE_MDELEM(data, storage) \
112   ((grpc_mdelem){((uintptr_t)(data)) | ((uintptr_t)storage)})
113 #endif
114 #define GRPC_MDELEM_IS_INTERNED(md)          \
115   ((grpc_mdelem_data_storage)((md).payload & \
116                               (uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT))
117 
118 /* Given arbitrary input slices, create a grpc_mdelem object. The caller refs
119  * the input slices; we unref them. This method is always safe to call; however,
120  * if we know data about the slices in question (e.g. if we knew our key was
121  * static) we can call specializations that save on cycle count. */
122 grpc_mdelem grpc_mdelem_from_slices(const grpc_slice& key,
123                                     const grpc_slice& value);
124 
125 /* Like grpc_mdelem_from_slices, but we know that key is a static slice. This
126    saves us a few branches and a no-op call to md_unref() for the key. */
127 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
128                                     const grpc_slice& value);
129 
130 /* Like grpc_mdelem_from_slices, but key is static and val is static. */
131 grpc_mdelem grpc_mdelem_from_slices(
132     const grpc_core::StaticMetadataSlice& key,
133     const grpc_core::StaticMetadataSlice& value);
134 
135 /* Like grpc_mdelem_from_slices, but key is static and val is interned. */
136 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
137                                     const grpc_core::ManagedMemorySlice& value);
138 
139 /* Like grpc_mdelem_from_slices, but key and val are interned. */
140 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::ManagedMemorySlice& key,
141                                     const grpc_core::ManagedMemorySlice& value);
142 
143 /* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata
144    object as backing storage (so lifetimes should align) */
145 grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata);
146 
147 /* Does not unref the slices; if a new non-interned mdelem is needed, allocates
148    one if compatible_external_backing_store is NULL, or uses
149    compatible_external_backing_store if it is non-NULL (in which case it's the
150    users responsibility to ensure that it outlives usage) */
151 grpc_mdelem grpc_mdelem_create(
152     const grpc_slice& key, const grpc_slice& value,
153     grpc_mdelem_data* compatible_external_backing_store);
154 
155 /* Like grpc_mdelem_create, but we know that key is static. */
156 grpc_mdelem grpc_mdelem_create(
157     const grpc_core::StaticMetadataSlice& key, const grpc_slice& value,
158     grpc_mdelem_data* compatible_external_backing_store);
159 
160 #define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key)
161 #define GRPC_MDVALUE(md) (GRPC_MDELEM_DATA(md)->value)
162 
163 bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b);
164 /* Often we compare metadata where we know a-priori that the second parameter is
165  * static, and that the keys match. This most commonly happens when processing
166  * metadata batch callouts in initial/trailing filters. In this case, fastpath
167  * grpc_mdelem_eq and remove unnecessary checks. */
grpc_mdelem_static_value_eq(grpc_mdelem a,grpc_mdelem b_static)168 inline bool grpc_mdelem_static_value_eq(grpc_mdelem a, grpc_mdelem b_static) {
169   if (a.payload == b_static.payload) return true;
170   return grpc_slice_eq_static_interned(GRPC_MDVALUE(a), GRPC_MDVALUE(b_static));
171 }
172 #define GRPC_MDISNULL(md) (GRPC_MDELEM_DATA(md) == NULL)
173 
grpc_mdelem_both_interned_eq(grpc_mdelem a_interned,grpc_mdelem b_interned)174 inline bool grpc_mdelem_both_interned_eq(grpc_mdelem a_interned,
175                                          grpc_mdelem b_interned) {
176   GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(a_interned) ||
177                    GRPC_MDISNULL(a_interned));
178   GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(b_interned) ||
179                    GRPC_MDISNULL(b_interned));
180   return a_interned.payload == b_interned.payload;
181 }
182 
183 /* Mutator and accessor for grpc_mdelem user data. The destructor function
184    is used as a type tag and is checked during user_data fetch. */
185 void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*if_destroy_func)(void*));
186 void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
187                                 void* data);
188 
189 // Defined in metadata.cc.
190 struct mdtab_shard;
191 
192 #ifndef NDEBUG
193 void grpc_mdelem_trace_ref(void* md, const grpc_slice& key,
194                            const grpc_slice& value, intptr_t refcnt,
195                            const char* file, int line);
196 void grpc_mdelem_trace_unref(void* md, const grpc_slice& key,
197                              const grpc_slice& value, intptr_t refcnt,
198                              const char* file, int line);
199 #endif
200 namespace grpc_core {
201 
202 typedef void (*destroy_user_data_func)(void* data);
203 
204 struct UserData {
205   Mutex mu_user_data;
206   grpc_core::Atomic<destroy_user_data_func> destroy_user_data;
207   grpc_core::Atomic<void*> data;
208 };
209 
210 class StaticMetadata {
211  public:
StaticMetadata(const grpc_slice & key,const grpc_slice & value,uintptr_t idx)212   StaticMetadata(const grpc_slice& key, const grpc_slice& value, uintptr_t idx)
213       : kv_({key, value}), hash_(0), static_idx_(idx) {}
214 
data()215   const grpc_mdelem_data& data() const { return kv_; }
216 
217   void HashInit();
hash()218   uint32_t hash() { return hash_; }
StaticIndex()219   uintptr_t StaticIndex() { return static_idx_; }
220 
221  private:
222   grpc_mdelem_data kv_;
223 
224   /* private only data */
225   uint32_t hash_;
226   uintptr_t static_idx_;
227 };
228 
229 class RefcountedMdBase {
230  public:
RefcountedMdBase(const grpc_slice & key,const grpc_slice & value)231   RefcountedMdBase(const grpc_slice& key, const grpc_slice& value)
232       : key_(key), value_(value), refcnt_(1) {}
RefcountedMdBase(const grpc_slice & key,const grpc_slice & value,uint32_t hash)233   RefcountedMdBase(const grpc_slice& key, const grpc_slice& value,
234                    uint32_t hash)
235       : key_(key), value_(value), refcnt_(1), hash_(hash) {}
236 
key()237   const grpc_slice& key() const { return key_; }
value()238   const grpc_slice& value() const { return value_; }
hash()239   uint32_t hash() { return hash_; }
240 
241 #ifndef NDEBUG
Ref(const char * file,int line)242   void Ref(const char* file, int line) {
243     grpc_mdelem_trace_ref(this, key_, value_, RefValue(), file, line);
244     const intptr_t prior = refcnt_.FetchAdd(1, MemoryOrder::RELAXED);
245     GPR_ASSERT(prior > 0);
246   }
Unref(const char * file,int line)247   bool Unref(const char* file, int line) {
248     grpc_mdelem_trace_unref(this, key_, value_, RefValue(), file, line);
249     return Unref();
250   }
251 #endif
Ref()252   void Ref() {
253     /* we can assume the ref count is >= 1 as the application is calling
254        this function - meaning that no adjustment to mdtab_free is necessary,
255        simplifying the logic here to be just an atomic increment */
256     refcnt_.FetchAdd(1, MemoryOrder::RELAXED);
257   }
Unref()258   bool Unref() {
259     const intptr_t prior = refcnt_.FetchSub(1, MemoryOrder::ACQ_REL);
260     GPR_DEBUG_ASSERT(prior > 0);
261     return prior == 1;
262   }
263 
264  protected:
265 #ifndef NDEBUG
266   void TraceAtStart(const char* tag);
267 #endif
268 
RefValue()269   intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); }
AllRefsDropped()270   bool AllRefsDropped() { return refcnt_.Load(MemoryOrder::ACQUIRE) == 0; }
FirstRef()271   bool FirstRef() { return refcnt_.FetchAdd(1, MemoryOrder::RELAXED) == 0; }
272 
273  private:
274   /* must be byte compatible with grpc_mdelem_data */
275   grpc_slice key_;
276   grpc_slice value_;
277   grpc_core::Atomic<intptr_t> refcnt_;
278   uint32_t hash_ = 0;
279 };
280 
281 class InternedMetadata : public RefcountedMdBase {
282  public:
283   // TODO(arjunroy): Change to use strongly typed slices instead.
284   struct NoRefKey {};
285   struct BucketLink {
BucketLinkBucketLink286     explicit BucketLink(InternedMetadata* md) : next(md) {}
287 
288     InternedMetadata* next = nullptr;
289   };
290   InternedMetadata(const grpc_slice& key, const grpc_slice& value,
291                    uint32_t hash, InternedMetadata* next);
292   InternedMetadata(const grpc_slice& key, const grpc_slice& value,
293                    uint32_t hash, InternedMetadata* next, const NoRefKey*);
294 
295   ~InternedMetadata();
296   void RefWithShardLocked(mdtab_shard* shard);
user_data()297   UserData* user_data() { return &user_data_; }
bucket_next()298   InternedMetadata* bucket_next() { return link_.next; }
set_bucket_next(InternedMetadata * md)299   void set_bucket_next(InternedMetadata* md) { link_.next = md; }
300 
301   static size_t CleanupLinkedMetadata(BucketLink* head);
302 
303  private:
304   UserData user_data_;
305   BucketLink link_;
306 };
307 
308 /* Shadow structure for grpc_mdelem_data for allocated elements */
309 class AllocatedMetadata : public RefcountedMdBase {
310  public:
311   // TODO(arjunroy): Change to use strongly typed slices instead.
312   struct NoRefKey {};
313   AllocatedMetadata(const grpc_slice& key, const grpc_slice& value);
314   AllocatedMetadata(const grpc_core::ManagedMemorySlice& key,
315                     const grpc_core::UnmanagedMemorySlice& value);
316   AllocatedMetadata(const grpc_core::ExternallyManagedSlice& key,
317                     const grpc_core::UnmanagedMemorySlice& value);
318   AllocatedMetadata(const grpc_slice& key, const grpc_slice& value,
319                     const NoRefKey*);
320   ~AllocatedMetadata();
321 
user_data()322   UserData* user_data() { return &user_data_; }
323 
324  private:
325   UserData user_data_;
326 };
327 
328 }  // namespace grpc_core
329 
330 #ifndef NDEBUG
331 #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
grpc_mdelem_ref(grpc_mdelem gmd,const char * file,int line)332 inline grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd, const char* file,
333                                    int line) {
334 #else  // ifndef NDEBUG
335 #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
336 inline grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd) {
337 #endif  // ifndef NDEBUG
338   switch (GRPC_MDELEM_STORAGE(gmd)) {
339     case GRPC_MDELEM_STORAGE_EXTERNAL:
340     case GRPC_MDELEM_STORAGE_STATIC:
341       break;
342     case GRPC_MDELEM_STORAGE_INTERNED: {
343       auto* md =
344           reinterpret_cast<grpc_core::InternedMetadata*> GRPC_MDELEM_DATA(gmd);
345       /* use C assert to have this removed in opt builds */
346 #ifndef NDEBUG
347       md->Ref(file, line);
348 #else
349       md->Ref();
350 #endif
351       break;
352     }
353     case GRPC_MDELEM_STORAGE_ALLOCATED: {
354       auto* md =
355           reinterpret_cast<grpc_core::AllocatedMetadata*> GRPC_MDELEM_DATA(gmd);
356 #ifndef NDEBUG
357       md->Ref(file, line);
358 #else
359       md->Ref();
360 #endif
361       break;
362     }
363   }
364   return gmd;
365 }
366 
367 #ifndef NDEBUG
368 #define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__)
369 void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr,
370                                 uint32_t hash, const char* file, int line);
371 inline void grpc_mdelem_unref(grpc_mdelem gmd, const char* file, int line) {
372 #else
373 #define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s))
374 void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr,
375                                 uint32_t hash);
376 inline void grpc_mdelem_unref(grpc_mdelem gmd) {
377 #endif
378   const grpc_mdelem_data_storage storage = GRPC_MDELEM_STORAGE(gmd);
379   switch (storage) {
380     case GRPC_MDELEM_STORAGE_EXTERNAL:
381     case GRPC_MDELEM_STORAGE_STATIC:
382       return;
383     case GRPC_MDELEM_STORAGE_INTERNED:
384     case GRPC_MDELEM_STORAGE_ALLOCATED:
385       auto* md =
386           reinterpret_cast<grpc_core::RefcountedMdBase*> GRPC_MDELEM_DATA(gmd);
387       /* once the refcount hits zero, some other thread can come along and
388          free an interned md at any time: it's unsafe from this point on to
389          access it so we read the hash now. */
390       uint32_t hash = md->hash();
391 #ifndef NDEBUG
392       if (GPR_UNLIKELY(md->Unref(file, line))) {
393         grpc_mdelem_on_final_unref(storage, md, hash, file, line);
394 #else
395       if (GPR_UNLIKELY(md->Unref())) {
396         grpc_mdelem_on_final_unref(storage, md, hash);
397 #endif
398       }
399       return;
400   }
401 }
402 
403 #define GRPC_MDNULL GRPC_MAKE_MDELEM(NULL, GRPC_MDELEM_STORAGE_EXTERNAL)
404 
405 /* We add 32 bytes of padding as per RFC-7540 section 6.5.2. */
406 #define GRPC_MDELEM_LENGTH(e)                                                  \
407   (GRPC_SLICE_LENGTH(GRPC_MDKEY((e))) + GRPC_SLICE_LENGTH(GRPC_MDVALUE((e))) + \
408    32)
409 
410 #define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash))
411 
412 void grpc_mdctx_global_init(void);
413 void grpc_mdctx_global_shutdown();
414 
415 /* Like grpc_mdelem_from_slices, but we know that key is a static or interned
416    slice and value is not static or interned. This gives us an inlinable
417    fastpath - we know we must allocate metadata now, and that we do not need to
418    unref the value (rather, we just transfer the ref). We can avoid a ref since:
419    1) the key slice is passed in already ref'd
420    2) We're guaranteed to create a new Allocated slice, thus meaning the
421       ref can be considered 'transferred'.*/
422 inline grpc_mdelem grpc_mdelem_from_slices(
423     const grpc_core::ManagedMemorySlice& key,
424     const grpc_core::UnmanagedMemorySlice& value) {
425   using grpc_core::AllocatedMetadata;
426   return GRPC_MAKE_MDELEM(new AllocatedMetadata(key, value),
427                           GRPC_MDELEM_STORAGE_ALLOCATED);
428 }
429 
430 inline grpc_mdelem grpc_mdelem_from_slices(
431     const grpc_core::ExternallyManagedSlice& key,
432     const grpc_core::UnmanagedMemorySlice& value) {
433   using grpc_core::AllocatedMetadata;
434   return GRPC_MAKE_MDELEM(new AllocatedMetadata(key, value),
435                           GRPC_MDELEM_STORAGE_ALLOCATED);
436 }
437 
438 inline grpc_mdelem grpc_mdelem_from_slices(
439     const grpc_core::StaticMetadataSlice& key,
440     const grpc_core::UnmanagedMemorySlice& value) {
441   using grpc_core::AllocatedMetadata;
442   return GRPC_MAKE_MDELEM(new AllocatedMetadata(key, value),
443                           GRPC_MDELEM_STORAGE_ALLOCATED);
444 }
445 
446 #endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_H */
447