1 // This file is part of BOINC.
2 // http://boinc.berkeley.edu
3 // Copyright (C) 2013 University of California
4 //
5 // BOINC is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU Lesser General Public License
7 // as published by the Free Software Foundation,
8 // either version 3 of the License, or (at your option) any later version.
9 //
10 // BOINC is distributed in the hope that it will be useful,
11 // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 // See the GNU Lesser General Public License for more details.
14 //
15 // You should have received a copy of the GNU Lesser General Public License
16 // along with BOINC. If not, see <http://www.gnu.org/licenses/>.
17
18 #if defined(__GNUG__) && !defined(__APPLE__)
19 #pragma implementation "AsyncRPC.h"
20 #endif
21
22 #if !(defined(_WIN32) || (defined(__WXMAC__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_4)))
23 #include <xlocale.h>
24 #endif
25
26 #include "stdwx.h"
27 #include "BOINCGUIApp.h"
28 #include "MainDocument.h"
29 #include "AsyncRPC.h"
30 #include "BOINCBaseFrame.h"
31 #include "BOINCTaskBar.h"
32 #include "error_numbers.h"
33 #include "SkinManager.h"
34 #include "DlgEventLog.h"
35 #include "util.h"
36
37 extern bool s_bSkipExitConfirmation;
38
39 // Delay in milliseconds before showing AsyncRPCDlg
40 #define RPC_WAIT_DLG_DELAY 1500
41 // How often to check for events when minimized and waiting for Demand RPC
42 #define DELAY_WHEN_MINIMIZED 500
43 // Delay in milliseconds to allow thread to exit before killing it
44 #define RPC_KILL_DELAY 2000
45
ASYNC_RPC_REQUEST()46 ASYNC_RPC_REQUEST::ASYNC_RPC_REQUEST() {
47 clear();
48 }
49
50
~ASYNC_RPC_REQUEST()51 ASYNC_RPC_REQUEST::~ASYNC_RPC_REQUEST() {
52 clear();
53 }
54
55
clear()56 void ASYNC_RPC_REQUEST::clear() {
57 rpcType = (ASYNC_RPC_TYPE) 0;
58 which_rpc = (RPC_SELECTOR) 0;
59 exchangeBuf = NULL;
60 arg1 = NULL;
61 arg2 = NULL;
62 arg3 = NULL;
63 arg4 = NULL;
64 completionTime = NULL;
65 RPCExecutionTime = NULL;
66 resultPtr = NULL;
67 retval = 0;
68 isActive = false;
69 }
70
71
isSameAs(ASYNC_RPC_REQUEST & otherRequest)72 bool ASYNC_RPC_REQUEST::isSameAs(ASYNC_RPC_REQUEST& otherRequest) {
73 if (which_rpc != otherRequest.which_rpc) return false;
74 if (arg1 != otherRequest.arg1) return false;
75 if (exchangeBuf != otherRequest.exchangeBuf) return false;
76 if (arg2 != otherRequest.arg2) return false;
77 if (arg3 != otherRequest.arg3) return false;
78 if (arg4 != otherRequest.arg4) return false;
79 if (rpcType != otherRequest.rpcType) return false;
80 if (completionTime != otherRequest.completionTime) return false;
81 if (resultPtr != otherRequest.resultPtr) return false;
82 // OK if isActive and retval don't match.
83 return true;
84 }
85
86
AsyncRPC(CMainDocument * pDoc)87 AsyncRPC::AsyncRPC(CMainDocument *pDoc) {
88 m_pDoc = pDoc;
89 }
90
91
~AsyncRPC()92 AsyncRPC::~AsyncRPC() {}
93
94
RPC_Wait(RPC_SELECTOR which_rpc,void * arg1,void * arg2,void * arg3,void * arg4,bool hasPriority)95 int AsyncRPC::RPC_Wait(RPC_SELECTOR which_rpc, void *arg1, void *arg2,
96 void *arg3, void *arg4, bool hasPriority
97 ) {
98 ASYNC_RPC_REQUEST request;
99 int retval = 0;
100
101 request.which_rpc = which_rpc;
102 request.arg1 = arg1;
103 request.arg2 = arg2;
104 request.arg3 = arg3;
105 request.arg4 = arg4;
106 if (which_rpc == RPC_QUIT) {
107 request.rpcType = RPC_TYPE_ASYNC_NO_REFRESH;
108 } else {
109 request.rpcType = RPC_TYPE_WAIT_FOR_COMPLETION;
110 }
111 request.RPCExecutionTime = NULL;
112 retval = m_pDoc->RequestRPC(request, hasPriority);
113 return retval;
114 }
115
116
RPCThread(CMainDocument * pDoc,BOINC_Mutex * pRPC_Thread_Mutex,BOINC_Condition * pRPC_Thread_Condition,BOINC_Mutex * pRPC_Request_Mutex,BOINC_Condition * pRPC_Request_Condition)117 RPCThread::RPCThread(CMainDocument *pDoc,
118 BOINC_Mutex* pRPC_Thread_Mutex,
119 BOINC_Condition* pRPC_Thread_Condition,
120 BOINC_Mutex* pRPC_Request_Mutex,
121 BOINC_Condition* pRPC_Request_Condition)
122 : wxThread() {
123 m_pDoc = pDoc;
124 m_pRPC_Thread_Mutex = pRPC_Thread_Mutex;
125 m_pRPC_Thread_Condition = pRPC_Thread_Condition;
126 m_pRPC_Request_Mutex = pRPC_Request_Mutex;
127 m_pRPC_Request_Condition = pRPC_Request_Condition;
128 }
129
Entry()130 void *RPCThread::Entry() {
131 int retval = 0;
132 CRPCFinishedEvent RPC_done_event( wxEVT_RPC_FINISHED );
133 ASYNC_RPC_REQUEST *current_request;
134 double startTime = 0;
135 wxMutexError mutexErr = wxMUTEX_NO_ERROR;
136 wxCondError condErr = wxCOND_NO_ERROR;
137
138 #ifndef NO_PER_THREAD_LOCALE
139 #ifdef __WXMSW__
140 // On Windows, set all locales for this thread on a per-thread basis
141 _configthreadlocale(_ENABLE_PER_THREAD_LOCALE);
142 setlocale(LC_ALL, "C");
143 #else
144 // We initialize RPC_Thread_Locale to fix a compiler warning
145 locale_t RPC_Thread_Locale = LC_GLOBAL_LOCALE;
146 #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_4)
147 if (uselocale) // uselocale() is not available in Mac OS 10.3.9
148 #endif
149 {
150 // On Mac / Unix / Linux, set "C" locale for this thread only
151 RPC_Thread_Locale = newlocale(LC_ALL_MASK, "C", NULL);
152 uselocale(RPC_Thread_Locale);
153 }
154 #endif // ifndef __WXMSW__
155 #endif // ifndef NO_PER_THREAD_LOCALE
156
157 m_pRPC_Thread_Mutex->Lock();
158 m_pDoc->m_bRPCThreadIsReady = true;
159 while(true) {
160 // Wait for main thread to wake us
161 // This does the following:
162 // (1) Unlocks the Mutex and puts the RPC thread to sleep as an atomic operation.
163 // (2) On Signal from main thread: locks Mutex again and wakes the RPC thread.
164 condErr = m_pRPC_Thread_Condition->Wait();
165 wxASSERT(condErr == wxCOND_NO_ERROR);
166
167 if (m_pDoc->m_bShutDownRPCThread) {
168 #if !defined(NO_PER_THREAD_LOCALE) && !defined(__WXMSW__)
169 #if defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_4)
170 if (uselocale) // uselocale() is not available in Mac OS 10.3.9
171 #endif
172 {
173 uselocale(LC_GLOBAL_LOCALE);
174 freelocale(RPC_Thread_Locale);
175 }
176 #endif
177 m_pRPC_Thread_Mutex->Unlock(); // Just for safety - not really needed
178 // Tell CMainDocument that thread has gracefully ended
179 // We do this here because OnExit() is not called on Windows
180 m_pDoc->m_RPCThread = NULL;
181 return 0;
182 }
183
184 current_request = m_pDoc->GetCurrentRPCRequest();
185
186 if (!current_request->isActive) continue; // Should never happen
187
188 if (current_request->RPCExecutionTime) {
189 startTime = dtime();
190 }
191 retval = ProcessRPCRequest();
192 if (current_request->RPCExecutionTime) {
193 *(current_request->RPCExecutionTime) = dtime() - startTime;
194 }
195
196 current_request->retval = retval;
197
198 mutexErr = m_pRPC_Request_Mutex->Lock();
199 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
200
201 current_request->isActive = false;
202 wxPostEvent( wxTheApp, RPC_done_event );
203
204 // Signal() is ignored / discarded unless the main thread is
205 // currently blocked by m_pRPC_Request_Condition->Wait[Timeout]()
206 m_pRPC_Request_Condition->Signal();
207
208 mutexErr = m_pRPC_Request_Mutex->Unlock();
209 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
210 }
211
212 return NULL;
213 }
214
215
ProcessRPCRequest()216 int RPCThread::ProcessRPCRequest() {
217 int retval = 0;
218 ASYNC_RPC_REQUEST *current_request = m_pDoc->GetCurrentRPCRequest();
219
220 switch (current_request->which_rpc) {
221 // RPC_SELECTORS with no arguments
222 case RPC_RUN_BENCHMARKS:
223 case RPC_QUIT:
224 case RPC_NETWORK_AVAILABLE:
225 case RPC_PROJECT_ATTACH_FROM_FILE:
226 case RPC_READ_GLOBAL_PREFS_OVERRIDE:
227 case RPC_READ_CC_CONFIG:
228 break;
229 default:
230 // All others must have at least one argument
231 if (current_request->arg1 == NULL) {
232 wxASSERT(false);
233 return -1;
234 }
235 break;
236 }
237 switch (current_request->which_rpc) {
238 case RPC_AUTHORIZE:
239 retval = (m_pDoc->rpcClient).authorize((const char*)(current_request->arg1));
240 break;
241 case RPC_EXCHANGE_VERSIONS:
242 retval = (m_pDoc->rpcClient).exchange_versions(*(VERSION_INFO*)(current_request->arg1));
243 break;
244 case RPC_GET_STATE:
245 retval = (m_pDoc->rpcClient).get_state(*(CC_STATE*)(current_request->arg1));
246 break;
247 case RPC_GET_RESULTS:
248 retval = (m_pDoc->rpcClient).get_results(*(RESULTS*)(current_request->arg1), *(bool*)(current_request->arg2));
249 break;
250 case RPC_GET_FILE_TRANSFERS:
251 retval = (m_pDoc->rpcClient).get_file_transfers(*(FILE_TRANSFERS*)(current_request->arg1));
252 break;
253 case RPC_GET_SIMPLE_GUI_INFO1:
254 retval = (m_pDoc->rpcClient).get_simple_gui_info(*(SIMPLE_GUI_INFO*)(current_request->arg1));
255 break;
256 case RPC_GET_SIMPLE_GUI_INFO2:
257 // RPC_GET_SIMPLE_GUI_INFO2 is equivalent to doing both
258 // RPC_GET_PROJECT_STATUS1 and RPC_GET_RESULTS
259 retval = (m_pDoc->rpcClient).get_results(*(RESULTS*)(current_request->arg3), *(bool*)(current_request->arg4));
260 if (!retval) {
261 retval = (m_pDoc->rpcClient).get_project_status(*(PROJECTS*)(current_request->arg1));
262 }
263 break;
264 case RPC_GET_PROJECT_STATUS1:
265 retval = (m_pDoc->rpcClient).get_project_status(*(PROJECTS*)(current_request->arg1));
266 break;
267 case RPC_GET_PROJECT_STATUS2:
268 retval = (m_pDoc->rpcClient).get_project_status(*(PROJECTS*)(current_request->arg1));
269 break;
270 case RPC_GET_ALL_PROJECTS_LIST:
271 retval = (m_pDoc->rpcClient).get_all_projects_list(*(ALL_PROJECTS_LIST*)(current_request->arg1));
272 break;
273 case RPC_GET_DISK_USAGE:
274 retval = (m_pDoc->rpcClient).get_disk_usage(*(DISK_USAGE*)(current_request->arg1));
275 break;
276 case RPC_PROJECT_OP:
277 retval = (m_pDoc->rpcClient).project_op(
278 *(PROJECT*)(current_request->arg1),
279 (const char*)(current_request->arg2)
280 );
281 break;
282 case RPC_SET_RUN_MODE:
283 retval = (m_pDoc->rpcClient).set_run_mode(
284 *(int*)(current_request->arg1),
285 *(double*)(current_request->arg2)
286 );
287 break;
288 case RPC_SET_GPU_MODE:
289 retval = (m_pDoc->rpcClient).set_gpu_mode(
290 *(int*)(current_request->arg1),
291 *(double*)(current_request->arg2)
292 );
293 break;
294 case RPC_SET_NETWORK_MODE:
295 retval = (m_pDoc->rpcClient).set_network_mode(
296 *(int*)(current_request->arg1),
297 *(double*)(current_request->arg2)
298 );
299 break;
300 case RPC_GET_SCREENSAVER_TASKS:
301 retval = (m_pDoc->rpcClient).get_screensaver_tasks(
302 *(int*)(current_request->arg1),
303 *(RESULTS*)(current_request->arg2)
304 );
305 break;
306 case RPC_RUN_BENCHMARKS:
307 retval = (m_pDoc->rpcClient).run_benchmarks();
308 break;
309 case RPC_SET_PROXY_SETTINGS:
310 retval = (m_pDoc->rpcClient).set_proxy_settings(*(GR_PROXY_INFO*)(current_request->arg1));
311 break;
312 case RPC_GET_PROXY_SETTINGS:
313 retval = (m_pDoc->rpcClient).get_proxy_settings(*(GR_PROXY_INFO*)(current_request->arg1));
314 break;
315 case RPC_GET_NOTICES:
316 retval = (m_pDoc->rpcClient).get_notices(
317 *(int*)(current_request->arg1),
318 *(NOTICES*)(current_request->arg2)
319 );
320 break;
321 case RPC_GET_MESSAGES:
322 retval = (m_pDoc->rpcClient).get_messages(
323 *(int*)(current_request->arg1),
324 *(MESSAGES*)(current_request->arg2),
325 *(bool*)(current_request->arg3)
326 );
327 break;
328 case RPC_FILE_TRANSFER_OP:
329 retval = (m_pDoc->rpcClient).file_transfer_op(
330 *(FILE_TRANSFER*)(current_request->arg1),
331 (const char*)(current_request->arg2)
332 );
333 break;
334 case RPC_RESULT_OP:
335 retval = (m_pDoc->rpcClient).result_op(
336 *(RESULT*)(current_request->arg1),
337 (const char*)(current_request->arg2)
338 );
339 break;
340 case RPC_GET_HOST_INFO:
341 retval = (m_pDoc->rpcClient).get_host_info(*(HOST_INFO*)(current_request->arg1));
342 break;
343 case RPC_QUIT:
344 retval = (m_pDoc->rpcClient).quit();
345 break;
346 case RPC_ACCT_MGR_INFO:
347 retval = (m_pDoc->rpcClient).acct_mgr_info(*(ACCT_MGR_INFO*)(current_request->arg1));
348 break;
349 case RPC_GET_STATISTICS:
350 retval = (m_pDoc->rpcClient).get_statistics(*(PROJECTS*)(current_request->arg1));
351 break;
352 case RPC_NETWORK_AVAILABLE:
353 retval = (m_pDoc->rpcClient).network_available();
354 break;
355 case RPC_GET_PROJECT_INIT_STATUS:
356 retval = (m_pDoc->rpcClient).get_project_init_status(*(PROJECT_INIT_STATUS*)(current_request->arg1));
357 break;
358 case RPC_GET_PROJECT_CONFIG:
359 retval = (m_pDoc->rpcClient).get_project_config(*(std::string*)(current_request->arg1));
360 break;
361 case RPC_GET_PROJECT_CONFIG_POLL:
362 retval = (m_pDoc->rpcClient).get_project_config_poll(*(PROJECT_CONFIG*)(current_request->arg1));
363 break;
364 case RPC_LOOKUP_ACCOUNT:
365 retval = (m_pDoc->rpcClient).lookup_account(*(ACCOUNT_IN*)(current_request->arg1));
366 break;
367 case RPC_LOOKUP_ACCOUNT_POLL:
368 retval = (m_pDoc->rpcClient).lookup_account_poll(*(ACCOUNT_OUT*)(current_request->arg1));
369 break;
370 case RPC_CREATE_ACCOUNT:
371 retval = (m_pDoc->rpcClient).create_account(*(ACCOUNT_IN*)(current_request->arg1));
372 break;
373 case RPC_CREATE_ACCOUNT_POLL:
374 retval = (m_pDoc->rpcClient).create_account_poll(*(ACCOUNT_OUT*)(current_request->arg1));
375 break;
376 case RPC_PROJECT_ATTACH:
377 retval = (m_pDoc->rpcClient).project_attach(
378 (const char*)(current_request->arg1),
379 (const char*)(current_request->arg2),
380 (const char*)(current_request->arg3)
381 );
382 break;
383 case RPC_PROJECT_ATTACH_FROM_FILE:
384 retval = (m_pDoc->rpcClient).project_attach_from_file();
385 break;
386 case RPC_PROJECT_ATTACH_POLL:
387 retval = (m_pDoc->rpcClient).project_attach_poll(*(PROJECT_ATTACH_REPLY*)(current_request->arg1));
388 break;
389 case RPC_ACCT_MGR_RPC:
390 retval = (m_pDoc->rpcClient).acct_mgr_rpc(
391 (const char*)(current_request->arg1),
392 (const char*)(current_request->arg2),
393 (const char*)(current_request->arg3),
394 (bool)(current_request->arg4 != NULL)
395 );
396 break;
397 case RPC_ACCT_MGR_RPC_POLL:
398 retval = (m_pDoc->rpcClient).acct_mgr_rpc_poll(*(ACCT_MGR_RPC_REPLY*)(current_request->arg1));
399 break;
400 case RPC_GET_NEWER_VERSION:
401 retval = (m_pDoc->rpcClient).get_newer_version(
402 *(std::string*)(current_request->arg1),
403 *(std::string*)(current_request->arg2)
404 );
405 break;
406 case RPC_READ_GLOBAL_PREFS_OVERRIDE:
407 retval = (m_pDoc->rpcClient).read_global_prefs_override();
408 break;
409 case RPC_READ_CC_CONFIG:
410 retval = (m_pDoc->rpcClient).read_cc_config();
411 break;
412 case RPC_GET_CC_STATUS:
413 retval = (m_pDoc->rpcClient).get_cc_status(*(CC_STATUS*)(current_request->arg1));
414 break;
415 case RPC_GET_GLOBAL_PREFS_FILE:
416 retval = (m_pDoc->rpcClient).get_global_prefs_file(*(std::string*)(current_request->arg1));
417 break;
418 case RPC_GET_GLOBAL_PREFS_WORKING:
419 retval = (m_pDoc->rpcClient).get_global_prefs_working(*(std::string*)(current_request->arg1));
420 break;
421 case RPC_GET_GLOBAL_PREFS_WORKING_STRUCT:
422 retval = (m_pDoc->rpcClient).get_global_prefs_working_struct(
423 *(GLOBAL_PREFS*)(current_request->arg1),
424 *(GLOBAL_PREFS_MASK*)(current_request->arg2)
425 );
426 break;
427 case RPC_GET_GLOBAL_PREFS_OVERRIDE:
428 retval = (m_pDoc->rpcClient).get_global_prefs_override(*(std::string*)(current_request->arg1));
429 break;
430 case RPC_SET_GLOBAL_PREFS_OVERRIDE:
431 retval = (m_pDoc->rpcClient).set_global_prefs_override(*(std::string*)(current_request->arg1));
432 break;
433 case RPC_GET_GLOBAL_PREFS_OVERRIDE_STRUCT:
434 retval = (m_pDoc->rpcClient).get_global_prefs_override_struct(
435 *(GLOBAL_PREFS*)(current_request->arg1),
436 *(GLOBAL_PREFS_MASK*)(current_request->arg2)
437 );
438 break;
439 case RPC_SET_GLOBAL_PREFS_OVERRIDE_STRUCT:
440 retval = (m_pDoc->rpcClient).set_global_prefs_override_struct(
441 *(GLOBAL_PREFS*)(current_request->arg1),
442 *(GLOBAL_PREFS_MASK*)(current_request->arg2)
443 );
444 break;
445 case RPC_GET_CC_CONFIG:
446 retval = (m_pDoc->rpcClient).get_cc_config(
447 *(CC_CONFIG*)(current_request->arg1),
448 *(LOG_FLAGS*)(current_request->arg2)
449 );
450 break;
451 case RPC_SET_CC_CONFIG:
452 retval = (m_pDoc->rpcClient).set_cc_config(
453 *(CC_CONFIG*)(current_request->arg1),
454 *(LOG_FLAGS*)(current_request->arg2)
455 );
456 break;
457 case RPC_SET_LANGUAGE:
458 retval = (m_pDoc->rpcClient).set_language(
459 (const char*)(current_request->arg1)
460 );
461 break;
462 default:
463 break;
464 }
465
466 return retval;
467 }
468
469
470 // TODO: combine RPC requests for different buffers, then just copy the buffer.
471
RequestRPC(ASYNC_RPC_REQUEST & request,bool hasPriority)472 int CMainDocument::RequestRPC(ASYNC_RPC_REQUEST& request, bool hasPriority) {
473 std::vector<ASYNC_RPC_REQUEST>::iterator iter;
474 int retval = 0;
475 int response = wxID_OK;
476 wxMutexError mutexErr = wxMUTEX_NO_ERROR;
477 long delayTimeRemaining, timeToSleep;
478 bool shown = false;
479
480 if (!m_RPCThread) return -1;
481
482 if ( (request.rpcType < RPC_TYPE_WAIT_FOR_COMPLETION) ||
483 (request.rpcType >= NUM_RPC_TYPES) ) {
484 wxASSERT(false);
485 return -1;
486 }
487
488 // If we are quitting, cancel any pending RPCs
489 if (request.which_rpc == RPC_QUIT) {
490 if (current_rpc_request.isActive) {
491 RPC_requests.erase(RPC_requests.begin()+1, RPC_requests.end());
492
493 } else {
494 RPC_requests.clear();
495 }
496 }
497
498 // Check if a duplicate request is already on the queue
499 for (iter=RPC_requests.begin(); iter!=RPC_requests.end(); ++iter) {
500 if (iter->isSameAs(request)) {
501 return 0;
502 }
503 }
504
505 if ((request.rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) && (request.resultPtr == NULL)) {
506 request.resultPtr = &retval;
507 }
508
509 if (hasPriority) {
510 // We may want to set hasPriority for some user-initiated events.
511 // Since the user is waiting, insert this at head of request queue.
512 // As of 8/14/08, hasPriority is never set true, so hasn't been tested.
513 iter = RPC_requests.insert(RPC_requests.begin(), request);
514 } else {
515 RPC_requests.push_back(request);
516 }
517
518 // Start this RPC if no other RPC is already in progress.
519 if (RPC_requests.size() == 1) {
520 // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait()
521 mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex
522 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
523
524 // Make sure activation is an atomic operation
525 request.isActive = false;
526 current_rpc_request = request;
527 current_rpc_request.isActive = true;
528
529 m_pRPC_Thread_Condition->Signal(); // Unblock the thread
530
531 // m_pRPC_Thread_Condition->Wait() will Lock() the mutex upon receiving Signal(),
532 // causing it to block again if we still have our lock on the mutex.
533 mutexErr = m_pRPC_Thread_Mutex->Unlock();
534 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
535 }
536
537 // If this is a user-initiated event wait for completion but show
538 // a dialog allowing the user to cancel.
539 if (request.rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) {
540 // TODO: proper handling if a second user request is received while first is pending ??
541 if (m_bWaitingForRPC) {
542 wxLogMessage(wxT("Second user RPC request while another was pending"));
543 wxASSERT(false);
544 return -1;
545 }
546 // Don't show dialog if RPC completes before RPC_WAIT_DLG_DELAY
547 // or while BOINC is minimized
548 CBOINCBaseFrame* pFrame = wxGetApp().GetFrame();
549 wxStopWatch Dlgdelay = wxStopWatch();
550 m_RPCWaitDlg = new AsyncRPCDlg();
551 m_bWaitingForRPC = true;
552
553 // Allow RPC_WAIT_DLG_DELAY seconds for Demand RPC to complete before
554 // displaying "Please Wait" dialog, but keep checking for completion.
555 delayTimeRemaining = RPC_WAIT_DLG_DELAY;
556 while (true) {
557 if (delayTimeRemaining >= 0) { // Prevent overflow if minimized for a very long time
558 delayTimeRemaining = RPC_WAIT_DLG_DELAY - Dlgdelay.Time();
559 }
560
561 if (pFrame) {
562 shown = pFrame->IsShown();
563 } else {
564 shown = false;
565 }
566
567 if (shown) {
568 if (delayTimeRemaining <= 0) break; // Display the Please Wait dialog
569 timeToSleep = delayTimeRemaining;
570 } else {
571 // Don't show dialog while Manager is minimized, but do
572 // process events so user can maximize the manager.
573 //
574 // NOTE: CBOINCGUIApp::FilterEvent() discards those events
575 // which might cause posting of more RPC requests while
576 // we are in this loop, to prevent undesirable recursion.
577 // Since the manager is minimized, we don't have to worry about
578 // discarding crucial drawing or command events.
579 // The filter does allow the the Open Manager menu item from
580 // the system tray icon and wxEVT_RPC_FINISHED event.
581 //
582 timeToSleep = DELAY_WHEN_MINIMIZED; // Allow user to maximize Manager
583 wxSafeYield(NULL, true);
584 }
585
586 // OnRPCComplete() clears m_bWaitingForRPC if RPC completed
587 if (! m_bWaitingForRPC) {
588 return retval;
589 }
590
591 mutexErr = m_pRPC_Request_Mutex->Lock();
592 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
593
594 // Simulate handling of CRPCFinishedEvent but don't allow any other
595 // events (so no user activity) to prevent undesirable recursion.
596 // Since we don't need to filter and discard events, they remain on
597 // the queue until it is safe to process them.
598 // Allow RPC thread to run while we wait for it.
599 if (!current_rpc_request.isActive) {
600 mutexErr = m_pRPC_Request_Mutex->Unlock();
601 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
602
603 HandleCompletedRPC();
604 continue;
605 }
606
607 // Wait for RPC thread to wake us
608 // This does the following:
609 // (1) Unlocks the Mutex and puts the main thread to sleep as an atomic operation.
610 // (2) On Signal from RPC thread: locks Mutex again and wakes the main thread.
611 m_pRPC_Request_Condition->WaitTimeout(timeToSleep);
612
613 mutexErr = m_pRPC_Request_Mutex->Unlock();
614 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
615 }
616
617 // Demand RPC has taken longer than RPC_WAIT_DLG_DELAY seconds and
618 // Manager is not minimized, so display the "Please Wait" dialog
619 // with a Cancel button. If the RPC does complete while the dialog
620 // is up, HandleCompletedRPC() will call EndModal with wxID_OK.
621 //
622 // NOTE: the Modal dialog permits processing of all events, but
623 // CBOINCGUIApp::FilterEvent() blocks those events which might cause
624 // posting of more RPC requests while in this dialog, to prevent
625 // undesirable recursion.
626 //
627 if (m_RPCWaitDlg) {
628 response = m_RPCWaitDlg->ShowModal();
629 // Remember time the dialog was closed for use by RunPeriodicRPCs()
630 m_dtLasAsyncRPCDlgTime = wxDateTime::Now();
631 if (response != wxID_OK) {
632 // TODO: If user presses Cancel in Please Wait dialog but request
633 // has not yet been started, should we just remove it from queue?
634 // If we make that change, should we also add a separate menu item
635 // to reset the RPC connection (or does one already exist)?
636
637 retval = -1;
638 // If the RPC continues to get data after we return to
639 // our caller, it may try to write into a buffer or struct
640 // which the caller has already deleted. To prevent this,
641 // we close the socket (disconnect) and kill the RPC thread.
642 // This is ugly but necessary. We must then reconnect and
643 // start a new RPC thread.
644 if (current_rpc_request.isActive) {
645 current_rpc_request.isActive = false;
646 rpcClient.close();
647 RPC_requests.clear();
648 current_rpc_request.clear();
649 m_bNeedRefresh = false;
650 m_bNeedTaskBarRefresh = false;
651
652 // We will be reconnected to the same client (if possible) by
653 // CBOINCDialUpManager::OnPoll() and CNetworkConnection::Poll().
654 m_pNetworkConnection->SetStateDisconnected();
655 }
656 if (response == wxID_EXIT) {
657 pFrame = wxGetApp().GetFrame();
658 wxCommandEvent evt(wxEVT_COMMAND_MENU_SELECTED, wxID_EXIT);
659 s_bSkipExitConfirmation = true;
660 pFrame->GetEventHandler()->AddPendingEvent(evt);
661 }
662 }
663 if (m_RPCWaitDlg) {
664 m_RPCWaitDlg->Destroy();
665 }
666 m_RPCWaitDlg = NULL;
667 m_bWaitingForRPC = false;
668 }
669 }
670 return retval;
671 }
672
673
KillRPCThread()674 void CMainDocument::KillRPCThread() {
675 wxMutexError mutexErr = wxMUTEX_NO_ERROR;
676 int i;
677
678 if (!m_RPCThread) {
679 return;
680 }
681
682 m_bNeedRefresh = false;
683 m_bNeedTaskBarRefresh = false;
684
685 rpcClient.close(); // Abort any async RPC in progress (in case hung)
686
687 // On some platforms, Delete() takes effect only when thread calls TestDestroy()
688 // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait()
689 mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex
690 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
691
692 m_bShutDownRPCThread = true;
693 m_pRPC_Thread_Condition->Signal(); // Unblock the thread
694
695 mutexErr = m_pRPC_Thread_Mutex->Unlock(); // Release the mutex so thread can lock it
696 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
697
698 RPC_requests.clear();
699 current_rpc_request.clear();
700
701 // Wait up to RPC_KILL_DELAY milliseconds for thread to exit on its own
702 for (i=0; i< RPC_KILL_DELAY; ++i) {
703 boinc_sleep(.001); // Defer to RPC thread for 1 millisecond
704 if (!m_RPCThread) {
705 return; // RPC thread sets m_RPCThread to NULL when it exits
706 }
707 }
708 // Thread failed to exit, so forcefully kill it
709 m_RPCThread->Kill();
710 }
711
712
OnRPCComplete(CRPCFinishedEvent &)713 void CMainDocument::OnRPCComplete(CRPCFinishedEvent&) {
714 HandleCompletedRPC();
715 }
716
717
HandleCompletedRPC()718 void CMainDocument::HandleCompletedRPC() {
719 int retval = 0;
720 wxMutexError mutexErr = wxMUTEX_NO_ERROR;
721 int i, n, requestIndex = -1;
722 bool stillWaitingForPendingRequests = false;
723
724 if (!m_RPCThread) return;
725
726 if (current_rpc_request.isActive) return;
727
728 // We can get here either via a CRPCFinishedEvent event posted
729 // by the RPC thread or by a call from RequestRPC. If we were
730 // called from RequestRPC, the CRPCFinishedEvent will still be
731 // on the event queue, so we get called twice. Check for this here.
732 if (current_rpc_request.which_rpc == 0) return; // already handled by a call from RequestRPC
733
734 // Find our completed request in the queue
735 n = (int) RPC_requests.size();
736 for (i=0; i<n; ++i) {
737 if (RPC_requests[i].isSameAs(current_rpc_request)) {
738 requestIndex = i;
739 } else {
740 if (RPC_requests[i].rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) {
741 stillWaitingForPendingRequests = true;
742 }
743 }
744 }
745
746 if (! stillWaitingForPendingRequests) {
747 if (m_RPCWaitDlg) {
748 if (m_RPCWaitDlg->IsShown()) {
749 m_RPCWaitDlg->EndModal(wxID_OK);
750 }
751 m_RPCWaitDlg->Destroy();
752 m_RPCWaitDlg = NULL;
753 }
754 m_bWaitingForRPC = false;
755 }
756
757 if (requestIndex >= 0) {
758 // Remove completed request from the queue
759 RPC_requests.erase(RPC_requests.begin()+requestIndex);
760 }
761
762 retval = current_rpc_request.retval;
763
764
765 if (current_rpc_request.completionTime) {
766 *(current_rpc_request.completionTime) = wxDateTime::Now();
767 }
768
769 if (current_rpc_request.resultPtr) {
770 *(current_rpc_request.resultPtr) = retval;
771 }
772
773 // Post-processing
774 if (! retval) {
775 if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_REFRESH_AFTER) {
776 if (!retval) {
777 m_bNeedRefresh = true;
778 }
779 }
780
781 if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_UPDATE_TASKBAR_ICON_AFTER) {
782 if (!retval) {
783 m_bNeedTaskBarRefresh = true;
784 }
785 }
786
787 switch (current_rpc_request.which_rpc) {
788 case RPC_GET_STATE:
789 if (current_rpc_request.exchangeBuf && !retval) {
790 CC_STATE* arg1 = (CC_STATE*)current_rpc_request.arg1;
791 CC_STATE* exchangeBuf = (CC_STATE*)current_rpc_request.exchangeBuf;
792 arg1->projects.swap(exchangeBuf->projects);
793 arg1->apps.swap(exchangeBuf->apps);
794 arg1->app_versions.swap(exchangeBuf->app_versions);
795 arg1->wus.swap(exchangeBuf->wus);
796 arg1->results.swap(exchangeBuf->results);
797 exchangeBuf->global_prefs = arg1->global_prefs;
798 exchangeBuf->version_info = arg1->version_info;
799 exchangeBuf->executing_as_daemon = arg1->executing_as_daemon;
800 exchangeBuf->host_info = arg1->host_info;
801 exchangeBuf->time_stats = arg1->time_stats;
802 exchangeBuf->have_nvidia = arg1->have_nvidia;
803 exchangeBuf->have_ati = arg1->have_ati;
804 }
805 break;
806 case RPC_GET_RESULTS:
807 if (current_rpc_request.exchangeBuf && !retval) {
808 RESULTS* arg1 = (RESULTS*)current_rpc_request.arg1;
809 RESULTS* exchangeBuf = (RESULTS*)current_rpc_request.exchangeBuf;
810 arg1->results.swap(exchangeBuf->results);
811 }
812 break;
813 case RPC_GET_FILE_TRANSFERS:
814 if (current_rpc_request.exchangeBuf && !retval) {
815 FILE_TRANSFERS* arg1 = (FILE_TRANSFERS*)current_rpc_request.arg1;
816 FILE_TRANSFERS* exchangeBuf = (FILE_TRANSFERS*)current_rpc_request.exchangeBuf;
817 arg1->file_transfers.swap(exchangeBuf->file_transfers);
818 }
819 break;
820 case RPC_GET_SIMPLE_GUI_INFO2:
821 if (!retval) {
822 retval = CopyProjectsToStateBuffer(*(PROJECTS*)(current_rpc_request.arg1), *(CC_STATE*)(current_rpc_request.arg2));
823 }
824 if (current_rpc_request.exchangeBuf && !retval) {
825 RESULTS* arg3 = (RESULTS*)current_rpc_request.arg3;
826 RESULTS* exchangeBuf = (RESULTS*)current_rpc_request.exchangeBuf;
827 arg3->results.swap(exchangeBuf->results);
828 }
829 break;
830 case RPC_GET_PROJECT_STATUS1:
831 if (!retval) {
832 retval = CopyProjectsToStateBuffer(*(PROJECTS*)(current_rpc_request.arg1), *(CC_STATE*)(current_rpc_request.arg2));
833 }
834 break;
835 case RPC_GET_ALL_PROJECTS_LIST:
836 if (current_rpc_request.exchangeBuf && !retval) {
837 ALL_PROJECTS_LIST* arg1 = (ALL_PROJECTS_LIST*)current_rpc_request.arg1;
838 ALL_PROJECTS_LIST* exchangeBuf = (ALL_PROJECTS_LIST*)current_rpc_request.exchangeBuf;
839 arg1->projects.swap(exchangeBuf->projects);
840 }
841 break;
842 case RPC_GET_DISK_USAGE:
843 if (current_rpc_request.exchangeBuf && !retval) {
844 DISK_USAGE* arg1 = (DISK_USAGE*)current_rpc_request.arg1;
845 DISK_USAGE* exchangeBuf = (DISK_USAGE*)current_rpc_request.exchangeBuf;
846 arg1->projects.swap(exchangeBuf->projects);
847 exchangeBuf->d_total = arg1->d_total;
848 exchangeBuf->d_free = arg1->d_free;
849 exchangeBuf->d_boinc = arg1->d_boinc;
850 exchangeBuf->d_allowed = arg1->d_allowed;
851 }
852 break;
853 case RPC_GET_NOTICES:
854 if (current_rpc_request.exchangeBuf && !retval) {
855 NOTICES* arg2 = (NOTICES*)current_rpc_request.arg2;
856 NOTICES* exchangeBuf = (NOTICES*)current_rpc_request.exchangeBuf;
857 arg2->notices.swap(exchangeBuf->notices);
858 }
859 if (!retval) {
860 CachedNoticeUpdate(); // Call this only when notice buffer is stable
861 }
862 m_bWaitingForGetNoticesRPC = false;
863 break;
864 case RPC_GET_MESSAGES:
865 if (current_rpc_request.exchangeBuf && !retval) {
866 MESSAGES* arg2 = (MESSAGES*)current_rpc_request.arg2;
867 MESSAGES* exchangeBuf = (MESSAGES*)current_rpc_request.exchangeBuf;
868 arg2->messages.swap(exchangeBuf->messages);
869 }
870 if (!retval) {
871 CachedMessageUpdate(); // Call this only when message buffer is stable
872 }
873 break;
874 case RPC_GET_HOST_INFO:
875 if (current_rpc_request.exchangeBuf && !retval) {
876 HOST_INFO* arg1 = (HOST_INFO*)current_rpc_request.arg1;
877 HOST_INFO* exchangeBuf = (HOST_INFO*)current_rpc_request.exchangeBuf;
878 *exchangeBuf = *arg1;
879 }
880 break;
881 case RPC_GET_STATISTICS:
882 if (current_rpc_request.exchangeBuf && !retval) {
883 PROJECTS* arg1 = (PROJECTS*)current_rpc_request.arg1;
884 PROJECTS* exchangeBuf = (PROJECTS*)current_rpc_request.exchangeBuf;
885 arg1->projects.swap(exchangeBuf->projects);
886 }
887 break;
888
889 case RPC_GET_CC_STATUS:
890 if (current_rpc_request.exchangeBuf && !retval) {
891 CC_STATUS* arg1 = (CC_STATUS*)current_rpc_request.arg1;
892 CC_STATUS* exchangeBuf = (CC_STATUS*)current_rpc_request.exchangeBuf;
893 *exchangeBuf = *arg1;
894 }
895 break;
896 case RPC_ACCT_MGR_INFO:
897 if (current_rpc_request.exchangeBuf && !retval) {
898 ACCT_MGR_INFO* arg1 = (ACCT_MGR_INFO*)current_rpc_request.arg1;
899 ACCT_MGR_INFO* exchangeBuf = (ACCT_MGR_INFO*)current_rpc_request.exchangeBuf;
900 *exchangeBuf = *arg1;
901 }
902 break;
903 default:
904 // We don't support double buffering for other RPC calls
905 wxASSERT(current_rpc_request.exchangeBuf == NULL);
906 break;
907 }
908 }
909
910 if (current_rpc_request.resultPtr) {
911 // In case post-processing changed retval
912 *(current_rpc_request.resultPtr) = retval;
913 }
914
915 // We must call ProcessEvent() rather than AddPendingEvent() here to
916 // guarantee integrity of data when other events are handled (such as
917 // Abort, Suspend/Resume, Show Graphics, Update, Detach, Reset, No
918 // New Work, etc.) Otherwise, if one of those events is pending it
919 // might be processed first, and the data in the selected rows may not
920 // match the data which the user selected if any rows were added or
921 // deleted due to the RPC.
922 // The refresh event called here adjusts the selections to fix any
923 // such mismatch before other pending events are processed.
924 //
925 // However, the refresh code may itself request a Demand RPC, which
926 // would cause undesirable recursion if we are already waiting for
927 // another Demand RPC to complete. In that case, we defer the refresh
928 // until all pending Demand RPCs have been done.
929 //
930 if (m_bNeedRefresh && !m_bWaitingForRPC) {
931 m_bNeedRefresh = false;
932 // We must get the frame immediately before using it,
933 // since it may have been changed by SetActiveGUI().
934 CBOINCBaseFrame* pFrame = wxGetApp().GetFrame();
935 if (pFrame) {
936 CFrameEvent event(wxEVT_FRAME_REFRESHVIEW, pFrame);
937 pFrame->GetEventHandler()->ProcessEvent(event);
938 }
939 }
940
941 if (m_bNeedTaskBarRefresh && !m_bWaitingForRPC) {
942 m_bNeedTaskBarRefresh = false;
943 CTaskBarIcon* pTaskbar = wxGetApp().GetTaskBarIcon();
944 if (pTaskbar) {
945 CTaskbarEvent event(wxEVT_TASKBAR_REFRESH, pTaskbar);
946 pTaskbar->ProcessEvent(event);
947 }
948 }
949
950 if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_REFRESH_EVENT_LOG_AFTER) {
951 CDlgEventLog* eventLog = wxGetApp().GetEventLog();
952 if (eventLog) {
953 eventLog->OnRefresh();
954 }
955 }
956
957 current_rpc_request.clear();
958
959 // Start the next RPC request.
960 // We can't start this until finished processing the previous RPC's
961 // event because the two requests may write into the same buffer.
962 if (RPC_requests.size() > 0) {
963 // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait()
964 mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex
965 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
966
967 // Make sure activation is an atomic operation
968 RPC_requests[0].isActive = false;
969 current_rpc_request = RPC_requests[0];
970 current_rpc_request.isActive = true;
971
972 m_pRPC_Thread_Condition->Signal(); // Unblock the thread
973
974 // m_pRPC_Thread_Condition->Wait() will Lock() the mutex upon receiving Signal(),
975 // causing it to block again if we still have our lock on the mutex.
976 mutexErr = m_pRPC_Thread_Mutex->Unlock();
977 wxASSERT(mutexErr == wxMUTEX_NO_ERROR);
978 }
979 }
980
981
CopyProjectsToStateBuffer(PROJECTS & p,CC_STATE & ccstate)982 int CMainDocument::CopyProjectsToStateBuffer(PROJECTS& p, CC_STATE& ccstate) {
983 int retval = 0;
984 unsigned int i;
985 PROJECT* state_project = NULL;
986
987 // flag for delete
988 for (i=0; i<ccstate.projects.size(); i++) {
989 state_project = ccstate.projects[i];
990 state_project->flag_for_delete = true;
991 }
992
993 for (i=0; i<p.projects.size(); i++) {
994 state_project = ccstate.lookup_project(p.projects[i]->master_url);
995 if (state_project && (!strcmp(p.projects[i]->master_url, state_project->master_url))) {
996 // Because the CC_STATE contains several pointers to each element of the
997 // CC_STATE::projects vector, we must update these elements in place.
998 *state_project = *(p.projects[i]);
999 state_project->flag_for_delete = false;
1000 } else {
1001 retval = ERR_NOT_FOUND;
1002 }
1003 continue;
1004 }
1005
1006 // Anything need to be deleted?
1007 if (!retval) {
1008 for (i=0; i<ccstate.projects.size(); i++) {
1009 state_project = ccstate.projects[i];
1010 if (state_project->flag_for_delete) {
1011 retval = ERR_FILE_MISSING;
1012 }
1013 }
1014 }
1015
1016 return retval;
1017 }
1018
1019
BEGIN_EVENT_TABLE(AsyncRPCDlg,wxDialog)1020 BEGIN_EVENT_TABLE(AsyncRPCDlg, wxDialog)
1021 EVT_BUTTON(wxID_EXIT, AsyncRPCDlg::OnExit)
1022 END_EVENT_TABLE()
1023
1024 IMPLEMENT_CLASS(AsyncRPCDlg, wxDialog)
1025
1026 AsyncRPCDlg::AsyncRPCDlg() : wxDialog( NULL, wxID_ANY, wxT(""), wxDefaultPosition ) {
1027 CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced();
1028 wxString exit_label;
1029 wxASSERT(pSkinAdvanced);
1030
1031 wxString message = wxString(_("Communicating with BOINC client. Please wait ..."));
1032
1033 #ifdef __WXMAC__
1034 exit_label.Printf(_("&Quit %s"), pSkinAdvanced->GetApplicationName().c_str());
1035 #else
1036 exit_label.Printf(_("E&xit %s"), pSkinAdvanced->GetApplicationName().c_str());
1037 #endif
1038
1039 wxString strCaption;
1040 strCaption.Printf(_("%s - Communication"), pSkinAdvanced->GetApplicationName().c_str());
1041 SetTitle(strCaption.c_str());
1042
1043 wxBoxSizer *topsizer = new wxBoxSizer( wxVERTICAL );
1044 wxBoxSizer *icon_text = new wxBoxSizer( wxHORIZONTAL );
1045
1046 icon_text->Add( CreateTextSizer( message ), 0, wxALIGN_CENTER | wxLEFT, 10 );
1047 topsizer->Add( icon_text, 1, wxCENTER | wxLEFT|wxRIGHT|wxTOP, 10 );
1048
1049 wxStdDialogButtonSizer *sizerBtn = CreateStdDialogButtonSizer(0);
1050
1051 wxButton* exitbutton = new wxButton;
1052 exitbutton->Create( this, wxID_EXIT, exit_label, wxDefaultPosition, wxDefaultSize, 0 );
1053 sizerBtn->Add(exitbutton, 0, wxLEFT|wxRIGHT|wxALL, 5);
1054
1055 wxButton* cancelbutton = new wxButton;
1056 cancelbutton->Create( this, wxID_CANCEL, _("Cancel"), wxDefaultPosition, wxDefaultSize, 0 );
1057 sizerBtn->Add(cancelbutton, 0, wxLEFT|wxRIGHT|wxALL, 5);
1058
1059 if ( sizerBtn )
1060 topsizer->Add(sizerBtn, 0, wxEXPAND | wxALL, 10 );
1061
1062 SetAutoLayout( true );
1063 SetSizer( topsizer );
1064
1065 topsizer->SetSizeHints( this );
1066 topsizer->Fit( this );
1067 wxSize size( GetSize() );
1068 if (size.x < size.y*3/2)
1069 {
1070 size.x = size.y*3/2;
1071 SetSize( size );
1072 }
1073
1074 Centre( wxBOTH | wxCENTER_FRAME);
1075 }
1076
1077
OnExit(wxCommandEvent & WXUNUSED (eventUnused))1078 void AsyncRPCDlg::OnExit(wxCommandEvent& WXUNUSED(eventUnused)) {
1079 EndModal(wxID_EXIT);
1080 }
1081
1082
1083 #if 0
1084
1085 /// For testing: triggered by Advanced / Options menu item.
1086 void CMainDocument::TestAsyncRPC() {
1087 ALL_PROJECTS_LIST pl;
1088 ASYNC_RPC_REQUEST request;
1089 wxDateTime completionTime = wxDateTime((time_t)0);
1090 int req_retval = 0, rpc_result = 0;
1091
1092 completionTime.ResetTime();
1093
1094 request.which_rpc = RPC_GET_ALL_PROJECTS_LIST;
1095 request.arg1 = &pl;
1096 request.exchangeBuf = NULL;
1097 request.arg2 = NULL;
1098 request.arg3 = NULL;
1099 request.arg4 = NULL;
1100 request.rpcType = RPC_TYPE_WAIT_FOR_COMPLETION;
1101 request.completionTime = &completionTime;
1102 // request.result = NULL;
1103 request.resultPtr = &rpc_result; // For testing async RPCs
1104 request.isActive = false;
1105
1106 //retval = rpcClient.get_all_projects_list(pl);
1107
1108 req_retval = RequestRPC(request, true);
1109
1110 wxString s = FormatTime();
1111 wxLogMessage(wxT("Completion time = %s"), s.c_str());
1112 wxLogMessage(wxT("RequestRPC returned %d\n"), req_retval);
1113 ::wxSafeYield(NULL, true); // Allow processing of RPC_FINISHED event
1114 wxLogMessage(wxT("rpcClient.get_all_projects_list returned %d\n"), rpc_result);
1115 }
1116
1117 #endif
1118