1 /* Copyright 2012-present Facebook, Inc.
2  * Licensed under the Apache License, Version 2.0 */
3 
4 #include "watchman.h"
5 
6 /* Free a pending_fs node */
w_pending_fs_free(struct watchman_pending_fs * p)7 void w_pending_fs_free(struct watchman_pending_fs *p) {
8   w_string_delref(p->path);
9   free(p);
10 }
11 
12 /* initialize a pending_coll */
w_pending_coll_init(struct watchman_pending_collection * coll)13 bool w_pending_coll_init(struct watchman_pending_collection *coll) {
14   coll->pending = NULL;
15   coll->pinged = false;
16   coll->pending_uniq = w_ht_new(WATCHMAN_BATCH_LIMIT, &w_ht_string_funcs);
17   if (!coll->pending_uniq) {
18     return false;
19   }
20   if (pthread_mutex_init(&coll->lock, NULL)) {
21     return false;
22   }
23   if (pthread_cond_init(&coll->cond, NULL)) {
24     return false;
25   }
26   return true;
27 }
28 
29 /* destroy a pending_coll */
w_pending_coll_destroy(struct watchman_pending_collection * coll)30 void w_pending_coll_destroy(struct watchman_pending_collection *coll) {
31   w_pending_coll_drain(coll);
32   w_ht_free(coll->pending_uniq);
33   pthread_mutex_destroy(&coll->lock);
34   pthread_cond_destroy(&coll->cond);
35 }
36 
37 /* drain and discard the content of a pending_coll, but do not destroy it */
w_pending_coll_drain(struct watchman_pending_collection * coll)38 void w_pending_coll_drain(struct watchman_pending_collection *coll) {
39   struct watchman_pending_fs *p;
40 
41   while ((p = w_pending_coll_pop(coll)) != NULL) {
42     w_pending_fs_free(p);
43   }
44 
45   w_ht_free_entries(coll->pending_uniq);
46 }
47 
48 /* compute a deadline on entry, then obtain the collection lock
49  * and wait until the deadline expires or until the collection is
50  * pinged.  On Return, the caller owns the collection lock. */
w_pending_coll_lock_and_wait(struct watchman_pending_collection * coll,int timeoutms)51 bool w_pending_coll_lock_and_wait(struct watchman_pending_collection *coll,
52     int timeoutms) {
53   struct timespec deadline;
54   int errcode;
55 
56   if (timeoutms != -1) {
57     w_timeoutms_to_abs_timespec(timeoutms, &deadline);
58   }
59   w_pending_coll_lock(coll);
60   if (coll->pending || coll->pinged) {
61     coll->pinged = false;
62     return true;
63   }
64   if (timeoutms == -1) {
65     errcode = pthread_cond_wait(&coll->cond, &coll->lock);
66   } else {
67     errcode = pthread_cond_timedwait(&coll->cond, &coll->lock, &deadline);
68   }
69 
70   return errcode == 0;
71 }
72 
w_pending_coll_ping(struct watchman_pending_collection * coll)73 void w_pending_coll_ping(struct watchman_pending_collection *coll) {
74   coll->pinged = true;
75   pthread_cond_broadcast(&coll->cond);
76 }
77 
78 /* obtain the collection lock */
w_pending_coll_lock(struct watchman_pending_collection * coll)79 void w_pending_coll_lock(struct watchman_pending_collection *coll) {
80   int err = pthread_mutex_lock(&coll->lock);
81   if (err != 0) {
82     w_log(W_LOG_FATAL, "lock assertion: %s\n", strerror(err));
83   }
84 }
85 
86 /* release the collection lock */
w_pending_coll_unlock(struct watchman_pending_collection * coll)87 void w_pending_coll_unlock(struct watchman_pending_collection *coll) {
88   int err = pthread_mutex_unlock(&coll->lock);
89   if (err != 0) {
90     w_log(W_LOG_FATAL, "unlock assertion: %s\n", strerror(err));
91   }
92 }
93 
consolidate_item(struct watchman_pending_fs * p,int flags)94 static inline void consolidate_item(struct watchman_pending_fs *p,
95     int flags) {
96   // Increase the strength of the pending item if either of these
97   // flags are set.
98   // We upgrade crawl-only as well as recursive; it indicates that
99   // we've recently just performed the stat and we want to avoid
100   // infinitely trying to stat-and-crawl
101   p->flags |= flags & (W_PENDING_CRAWL_ONLY|W_PENDING_RECURSIVE);
102 }
103 
104 /* add a pending entry.  Will consolidate an existing entry with the
105  * same name.  Returns false if an allocation fails.
106  * The caller must own the collection lock. */
w_pending_coll_add(struct watchman_pending_collection * coll,w_string_t * path,struct timeval now,int flags)107 bool w_pending_coll_add(struct watchman_pending_collection *coll,
108     w_string_t *path, struct timeval now, int flags) {
109   struct watchman_pending_fs *p;
110 
111   p = w_ht_val_ptr(w_ht_get(coll->pending_uniq, w_ht_ptr_val(path)));
112   if (p) {
113     /* Entry already exists: consolidate */
114     consolidate_item(p, flags);
115     /* all done */
116     return true;
117   }
118 
119   p = calloc(1, sizeof(*p));
120   if (!p) {
121     return false;
122   }
123 
124   w_log(W_LOG_DBG, "add_pending: %.*s\n", path->len, path->buf);
125 
126   p->flags = flags;
127   p->now = now;
128   p->path = path;
129   w_string_addref(path);
130 
131   p->next = coll->pending;
132   coll->pending = p;
133   w_ht_set(coll->pending_uniq, w_ht_ptr_val(path), w_ht_ptr_val(p));
134 
135   return true;
136 }
137 
w_pending_coll_add_rel(struct watchman_pending_collection * coll,struct watchman_dir * dir,const char * name,struct timeval now,int flags)138 bool w_pending_coll_add_rel(struct watchman_pending_collection *coll,
139     struct watchman_dir *dir, const char *name,
140     struct timeval now, int flags)
141 {
142   w_string_t *path_str;
143   bool res;
144 
145   path_str = w_string_path_cat_cstr(dir->path, name);
146   if (!path_str) {
147     return false;
148   }
149   res = w_pending_coll_add(coll, path_str, now, flags);
150   w_string_delref(path_str);
151 
152   return res;
153 }
154 
155 /* Append the contents of src to target, consolidating in target.
156  * src is effectively drained in the process.
157  * Caller must own the lock on both src and target. */
w_pending_coll_append(struct watchman_pending_collection * target,struct watchman_pending_collection * src)158 void w_pending_coll_append(struct watchman_pending_collection *target,
159     struct watchman_pending_collection *src) {
160   struct watchman_pending_fs *p, *target_p;
161 
162   while ((p = w_pending_coll_pop(src)) != NULL) {
163     target_p = w_ht_val_ptr(w_ht_get(target->pending_uniq,
164                             w_ht_ptr_val(p->path)));
165     if (target_p) {
166       /* Entry already exists: consolidate */
167       consolidate_item(target_p, p->flags);
168       w_pending_fs_free(p);
169       continue;
170     }
171 
172     p->next = target->pending;
173     target->pending = p;
174     w_ht_set(target->pending_uniq, w_ht_ptr_val(p->path), w_ht_ptr_val(p));
175   }
176 
177   w_ht_free_entries(src->pending_uniq);
178   src->pending = NULL;
179 }
180 
181 /* Logically pop an entry from the collection.
182  * Does NOT remove the entry from the uniq hash.
183  * The intent is that the caller will call this in a tight loop and
184  * then _drain() it at the end to clear the uniq hash */
w_pending_coll_pop(struct watchman_pending_collection * coll)185 struct watchman_pending_fs *w_pending_coll_pop(
186     struct watchman_pending_collection *coll) {
187   struct watchman_pending_fs *p = coll->pending;
188 
189   if (p) {
190     coll->pending = p->next;
191     p->next = NULL;
192   }
193 
194   return p;
195 }
196 
197 /* Returns the number of unique pending items in the collection */
w_pending_coll_size(struct watchman_pending_collection * coll)198 uint32_t w_pending_coll_size(struct watchman_pending_collection *coll) {
199   return w_ht_size(coll->pending_uniq);
200 }
201 
202 /* vim:ts=2:sw=2:et:
203  */
204