1package systests
2
3import (
4	"context"
5	"fmt"
6	"io"
7	"testing"
8	"time"
9
10	client "github.com/keybase/client/go/client"
11	engine "github.com/keybase/client/go/engine"
12	libkb "github.com/keybase/client/go/libkb"
13	logger "github.com/keybase/client/go/logger"
14	chat1 "github.com/keybase/client/go/protocol/chat1"
15	keybase1 "github.com/keybase/client/go/protocol/keybase1"
16	service "github.com/keybase/client/go/service"
17	teams "github.com/keybase/client/go/teams"
18	clockwork "github.com/keybase/clockwork"
19	rpc "github.com/keybase/go-framed-msgpack-rpc/rpc"
20	"github.com/stretchr/testify/require"
21	contextOld "golang.org/x/net/context"
22)
23
24// Tests for systests with multiuser, multidevice situations.
25// The abbreviation "smu" means "Systests Multi User".  So
26// smuUser is a 'Systests Multi User User"'
27
28type smuUser struct {
29	ctx            *smuContext
30	devices        []*smuDeviceWrapper
31	backupKeys     []backupKey
32	usernamePrefix string
33	username       string
34	userInfo       *signupInfo
35	primary        *smuDeviceWrapper
36	notifications  *teamNotifyHandler
37}
38
39type smuContext struct {
40	t         *testing.T
41	log       logger.Logger
42	fakeClock clockwork.FakeClock
43	users     map[string](*smuUser)
44}
45
46func newSMUContext(t *testing.T) *smuContext {
47	ret := &smuContext{
48		t:         t,
49		users:     make(map[string](*smuUser)),
50		fakeClock: clockwork.NewFakeClockAt(time.Now()),
51	}
52	return ret
53}
54
55func (smc *smuContext) cleanup() {
56	for _, v := range smc.users {
57		v.cleanup()
58	}
59}
60
61func (u *smuUser) cleanup() {
62	if u == nil {
63		return
64	}
65	for _, d := range u.devices {
66		d.tctx.Cleanup()
67		if d.service != nil {
68			d.service.Stop(0)
69			err := d.stop()
70			require.NoError(d.tctx.T, err)
71		}
72		for _, cl := range d.clones {
73			cl.Cleanup()
74		}
75		for _, cl := range d.usedClones {
76			cl.Cleanup()
77		}
78	}
79}
80
81// smuDeviceWrapper wraps a mock "device", meaning an independent running service and
82// some connected clients.
83type smuDeviceWrapper struct {
84	ctx                *smuContext
85	tctx               *libkb.TestContext
86	clones, usedClones []*libkb.TestContext
87	deviceKey          keybase1.PublicKey
88	stopCh             chan error
89	service            *service.Service
90	cli                rpc.GenericClient
91	xp                 rpc.Transporter
92}
93
94func (d *smuDeviceWrapper) KID() keybase1.KID {
95	return d.deviceKey.KID
96}
97
98func (d *smuDeviceWrapper) startService(numClones int) {
99	for i := 0; i < numClones; i++ {
100		d.clones = append(d.clones, cloneContext(d.tctx))
101	}
102	d.stopCh = make(chan error)
103	svc := service.NewService(d.tctx.G, false)
104	d.service = svc
105	startCh := svc.GetStartChannel()
106	go func() {
107		d.stopCh <- svc.Run()
108	}()
109	<-startCh
110}
111
112func (d *smuDeviceWrapper) stop() error {
113	return <-d.stopCh
114}
115
116func (d *smuDeviceWrapper) clearUPAKCache() {
117	_, err := d.tctx.G.LocalDb.Nuke()
118	require.NoError(d.tctx.T, err)
119	d.tctx.G.GetUPAKLoader().ClearMemory()
120}
121
122type smuTerminalUI struct{}
123
124func (t smuTerminalUI) ErrorWriter() io.Writer                                        { return nil }
125func (t smuTerminalUI) Output(string) error                                           { return nil }
126func (t smuTerminalUI) OutputDesc(libkb.OutputDescriptor, string) error               { return nil }
127func (t smuTerminalUI) OutputWriter() io.Writer                                       { return nil }
128func (t smuTerminalUI) UnescapedOutputWriter() io.Writer                              { return nil }
129func (t smuTerminalUI) Printf(fmt string, args ...interface{}) (int, error)           { return 0, nil }
130func (t smuTerminalUI) PrintfUnescaped(fmt string, args ...interface{}) (int, error)  { return 0, nil }
131func (t smuTerminalUI) Prompt(libkb.PromptDescriptor, string) (string, error)         { return "", nil }
132func (t smuTerminalUI) PromptForConfirmation(prompt string) error                     { return nil }
133func (t smuTerminalUI) PromptPassword(libkb.PromptDescriptor, string) (string, error) { return "", nil }
134func (t smuTerminalUI) PromptPasswordMaybeScripted(libkb.PromptDescriptor, string) (string, error) {
135	return "", nil
136}
137func (t smuTerminalUI) PromptYesNo(libkb.PromptDescriptor, string, libkb.PromptDefault) (bool, error) {
138	return false, nil
139}
140func (t smuTerminalUI) Tablify(headings []string, rowfunc func() []string) {}
141func (t smuTerminalUI) TerminalSize() (width int, height int)              { return }
142
143type signupInfoSecretUI struct {
144	signupInfo *signupInfo
145	log        logger.Logger
146}
147
148func (s signupInfoSecretUI) GetPassphrase(p keybase1.GUIEntryArg, terminal *keybase1.SecretEntryArg) (res keybase1.GetPassphraseRes, err error) {
149	if p.Type == keybase1.PassphraseType_PAPER_KEY {
150		res.Passphrase = s.signupInfo.displayedPaperKey
151	} else {
152		res.Passphrase = s.signupInfo.passphrase
153	}
154	s.log.Debug("| GetPassphrase: %v -> %v", p, res)
155	return res, err
156}
157
158type usernameLoginUI struct {
159	username string
160}
161
162var _ libkb.LoginUI = (*usernameLoginUI)(nil)
163
164func (s usernameLoginUI) GetEmailOrUsername(contextOld.Context, int) (string, error) {
165	return s.username, nil
166}
167func (s usernameLoginUI) PromptRevokePaperKeys(contextOld.Context, keybase1.PromptRevokePaperKeysArg) (ret bool, err error) {
168	return false, nil
169}
170func (s usernameLoginUI) DisplayPaperKeyPhrase(contextOld.Context, keybase1.DisplayPaperKeyPhraseArg) error {
171	return nil
172}
173func (s usernameLoginUI) DisplayPrimaryPaperKey(contextOld.Context, keybase1.DisplayPrimaryPaperKeyArg) error {
174	return nil
175}
176func (s usernameLoginUI) PromptResetAccount(_ context.Context, arg keybase1.PromptResetAccountArg) (keybase1.ResetPromptResponse, error) {
177	return keybase1.ResetPromptResponse_NOTHING, nil
178}
179func (s usernameLoginUI) DisplayResetProgress(_ context.Context, arg keybase1.DisplayResetProgressArg) error {
180	return nil
181}
182func (s usernameLoginUI) ExplainDeviceRecovery(_ context.Context, arg keybase1.ExplainDeviceRecoveryArg) error {
183	return nil
184}
185func (s usernameLoginUI) PromptPassphraseRecovery(_ context.Context, arg keybase1.PromptPassphraseRecoveryArg) (bool, error) {
186	return false, nil
187}
188func (s usernameLoginUI) ChooseDeviceToRecoverWith(_ context.Context, arg keybase1.ChooseDeviceToRecoverWithArg) (keybase1.DeviceID, error) {
189	return "", nil
190}
191func (s usernameLoginUI) DisplayResetMessage(_ context.Context, arg keybase1.DisplayResetMessageArg) error {
192	return nil
193}
194
195func (d *smuDeviceWrapper) popClone() *libkb.TestContext {
196	if len(d.clones) == 0 {
197		panic("ran out of cloned environments")
198	}
199	ret := d.clones[0]
200	d.clones = d.clones[1:]
201	// Hold a reference to this clone for cleanup
202	d.usedClones = append(d.usedClones, ret)
203	ui := genericUI{
204		g:          ret.G,
205		TerminalUI: smuTerminalUI{},
206	}
207	ret.G.SetUI(&ui)
208	return ret
209}
210
211func (smc *smuContext) setupDeviceHelper(u *smuUser, puk bool) *smuDeviceWrapper {
212	tctx := setupTest(smc.t, u.usernamePrefix)
213	tctx.Tp.DisableUpgradePerUserKey = !puk
214	tctx.G.SetClock(smc.fakeClock)
215	ret := &smuDeviceWrapper{ctx: smc, tctx: tctx}
216	u.devices = append(u.devices, ret)
217	if u.primary == nil {
218		u.primary = ret
219	}
220	if smc.log == nil {
221		smc.log = tctx.G.Log
222	}
223	return ret
224}
225
226func (smc *smuContext) installKeybaseForUser(usernamePrefix string, numClones int) *smuUser {
227	user := &smuUser{ctx: smc, usernamePrefix: usernamePrefix}
228	smc.users[usernamePrefix] = user
229	smc.newDevice(user, numClones)
230	return user
231}
232
233func (smc *smuContext) installKeybaseForUserNoPUK(usernamePrefix string, numClones int) *smuUser {
234	user := &smuUser{ctx: smc, usernamePrefix: usernamePrefix}
235	smc.users[usernamePrefix] = user
236	smc.newDevice(user, numClones)
237	return user
238}
239
240func (smc *smuContext) newDevice(u *smuUser, numClones int) *smuDeviceWrapper {
241	return smc.newDeviceHelper(u, numClones, true)
242}
243
244func (smc *smuContext) newDeviceHelper(u *smuUser, numClones int, puk bool) *smuDeviceWrapper {
245	ret := smc.setupDeviceHelper(u, puk)
246	ret.startService(numClones)
247	ret.startClient()
248	return ret
249}
250
251func (u *smuUser) primaryDevice() *smuDeviceWrapper {
252	return u.primary
253}
254
255func (d *smuDeviceWrapper) userClient() keybase1.UserClient {
256	return keybase1.UserClient{Cli: d.cli}
257}
258
259func (d *smuDeviceWrapper) ctlClient() keybase1.CtlClient {
260	return keybase1.CtlClient{Cli: d.cli}
261}
262
263func (d *smuDeviceWrapper) rpcClient() rpc.GenericClient {
264	return d.cli
265}
266
267func (d *smuDeviceWrapper) transport() rpc.Transporter {
268	return d.xp
269}
270
271func (d *smuDeviceWrapper) startClient() {
272	var err error
273	tctx := d.popClone()
274	d.cli, d.xp, err = client.GetRPCClientWithContext(tctx.G)
275	if err != nil {
276		d.ctx.t.Fatal(err)
277	}
278}
279
280func (d *smuDeviceWrapper) loadEncryptionKIDs() (devices []keybase1.KID, backups []backupKey) {
281	keyMap := make(map[keybase1.KID]keybase1.PublicKey)
282	keys, err := d.userClient().LoadMyPublicKeys(context.TODO(), 0)
283	if err != nil {
284		d.ctx.t.Fatalf("Failed to LoadMyPublicKeys: %s", err)
285	}
286	for _, key := range keys {
287		keyMap[key.KID] = key
288	}
289
290	for _, key := range keys {
291		if key.IsSibkey {
292			continue
293		}
294		parent, found := keyMap[keybase1.KID(key.ParentID)]
295		if !found {
296			continue
297		}
298
299		switch parent.DeviceType {
300		case keybase1.DeviceTypeV2_PAPER:
301			backups = append(backups, backupKey{KID: key.KID, deviceID: parent.DeviceID})
302		case keybase1.DeviceTypeV2_DESKTOP:
303			devices = append(devices, key.KID)
304		default:
305		}
306	}
307	return devices, backups
308}
309
310func (u *smuUser) signup() {
311	u.signupHelper(true, false)
312}
313
314func (u *smuUser) signupNoPUK() {
315	u.signupHelper(false, false)
316}
317
318func (u *smuUser) signupHelper(puk, paper bool) {
319	ctx := u.ctx
320	userInfo := randomUser(u.usernamePrefix)
321	u.userInfo = userInfo
322	dw := u.primaryDevice()
323	tctx := dw.popClone()
324	tctx.Tp.DisableUpgradePerUserKey = !puk
325	g := tctx.G
326	signupUI := signupUI{
327		info:         userInfo,
328		Contextified: libkb.NewContextified(g),
329	}
330	g.SetUI(&signupUI)
331	signup := client.NewCmdSignupRunner(g)
332	signup.SetTestWithPaper(paper)
333	if err := signup.Run(); err != nil {
334		ctx.t.Fatal(err)
335	}
336	ctx.t.Logf("signed up %s", userInfo.username)
337	u.username = userInfo.username
338	var backupKey backupKey
339	devices, backups := dw.loadEncryptionKIDs()
340	if len(devices) != 1 {
341		ctx.t.Fatalf("Expected 1 device back; got %d", len(devices))
342	}
343	dw.deviceKey.KID = devices[0]
344	if paper {
345		if len(backups) != 1 {
346			ctx.t.Fatalf("Expected 1 backup back; got %d", len(backups))
347		}
348		backupKey = backups[0]
349		backupKey.secret = signupUI.info.displayedPaperKey
350		u.backupKeys = append(u.backupKeys, backupKey)
351	}
352
353	// Reconfigure config subsystem in Primary Global Context and also
354	// in all clones. This has to be done after signup because the
355	// username changes, and so does config filename.
356	err := dw.tctx.G.ConfigureConfig()
357	require.NoError(ctx.t, err)
358	for _, clone := range dw.clones {
359		err = clone.G.ConfigureConfig()
360		require.NoError(ctx.t, err)
361	}
362}
363
364func (u *smuUser) perUserKeyUpgrade() error {
365	g := u.getPrimaryGlobalContext()
366	arg := &engine.PerUserKeyUpgradeArgs{}
367	eng := engine.NewPerUserKeyUpgrade(g, arg)
368	uis := libkb.UIs{
369		LogUI: g.UI.GetLogUI(),
370	}
371	m := libkb.NewMetaContextTODO(g).WithUIs(uis)
372	err := engine.RunEngine2(m, eng)
373	return err
374}
375
376type smuTeam struct {
377	ID   keybase1.TeamID
378	name string
379}
380
381type smuImplicitTeam struct {
382	ID keybase1.TeamID
383}
384
385func (u *smuUser) registerForNotifications() {
386	u.notifications = newTeamNotifyHandler()
387	srv := rpc.NewServer(u.primaryDevice().transport(), nil)
388	if err := srv.Register(keybase1.NotifyTeamProtocol(u.notifications)); err != nil {
389		u.ctx.t.Fatal(err)
390	}
391	ncli := keybase1.NotifyCtlClient{Cli: u.primaryDevice().rpcClient()}
392	if err := ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{Team: true}); err != nil {
393		u.ctx.t.Fatal(err)
394	}
395}
396
397func (u *smuUser) waitForNewlyAddedToTeamByID(teamID keybase1.TeamID) {
398	u.ctx.t.Logf("waiting for newly added to team %s", teamID)
399
400	// process 10 team rotations or 10s worth of time
401	for i := 0; i < 10; i++ {
402		select {
403		case tid := <-u.notifications.newlyAddedToTeam:
404			u.ctx.t.Logf("team newly added notification received: %v", tid)
405			if tid.Eq(teamID) {
406				u.ctx.t.Logf("notification matched!")
407				return
408			}
409			u.ctx.t.Logf("ignoring newly added message (expected teamID = %q)", teamID)
410		case <-time.After(1 * time.Second * libkb.CITimeMultiplier(u.getPrimaryGlobalContext())):
411		}
412	}
413	u.ctx.t.Fatalf("timed out waiting for team newly added %s", teamID)
414}
415
416func (u *smuUser) waitForTeamAbandoned(teamID keybase1.TeamID) {
417	u.ctx.t.Logf("waiting for team abandoned %s", teamID)
418
419	// process 10 team rotations or 10s worth of time
420	for i := 0; i < 10; i++ {
421		select {
422		case abandonID := <-u.notifications.abandonCh:
423			u.ctx.t.Logf("team abandon notification received: %v", abandonID)
424			if abandonID.Eq(teamID) {
425				u.ctx.t.Logf("abandon matched!")
426				return
427			}
428			u.ctx.t.Logf("ignoring abandon message (expected teamID = %q)", teamID)
429		case <-time.After(1 * time.Second * libkb.CITimeMultiplier(u.getPrimaryGlobalContext())):
430		}
431	}
432	u.ctx.t.Fatalf("timed out waiting for team abandon %s", teamID)
433}
434
435func (u *smuUser) getTeamsClient() keybase1.TeamsClient {
436	return keybase1.TeamsClient{Cli: u.primaryDevice().rpcClient()}
437}
438
439func (u *smuUser) pollForMembershipUpdate(team smuTeam, keyGen keybase1.PerTeamKeyGeneration,
440	poller func(d keybase1.TeamDetails) bool) keybase1.TeamDetails {
441	wait := 100 * time.Millisecond
442	var totalWait time.Duration
443	i := 0
444	for {
445		cli := u.getTeamsClient()
446		details, err := cli.TeamGet(context.TODO(), keybase1.TeamGetArg{Name: team.name})
447		if err != nil {
448			u.ctx.t.Fatal(err)
449		}
450		// If the caller specified a "poller" that means we should keep polling until
451		// the predicate turns true
452		if details.KeyGeneration == keyGen && (poller == nil || poller(details)) {
453			u.ctx.log.Debug("found key generation %d", keyGen)
454			return details
455		}
456		if i == 9 {
457			break
458		}
459		i++
460		u.ctx.log.Debug("in pollForMembershipUpdate: iter=%d; missed it, now waiting for %s (latest details.KG = %d; poller=%v)", i, wait, details.KeyGeneration, (poller != nil))
461		kickTeamRekeyd(u.getPrimaryGlobalContext(), u.ctx.t)
462		time.Sleep(wait)
463		totalWait += wait
464		wait *= 2
465	}
466	require.FailNowf(u.ctx.t, "pollForMembershipUpdate timed out",
467		"Failed to find the needed key generation (%d) after %s of waiting (%d iterations)",
468		keyGen, totalWait, i)
469	return keybase1.TeamDetails{}
470}
471
472func (u *smuUser) pollForTeamSeqnoLink(team smuTeam, toSeqno keybase1.Seqno) {
473	for i := 0; i < 20; i++ {
474		details, err := teams.Load(context.TODO(), u.getPrimaryGlobalContext(), keybase1.LoadTeamArg{
475			Name:        team.name,
476			ForceRepoll: true,
477		})
478		if err != nil {
479			u.ctx.t.Fatalf("error while loading team %q: %v", team.name, err)
480		}
481
482		if details.CurrentSeqno() >= toSeqno {
483			u.ctx.t.Logf("Found new seqno %d at poll loop iter %d", details.CurrentSeqno(), i)
484			return
485		}
486
487		time.Sleep(500 * time.Millisecond)
488	}
489
490	u.ctx.t.Fatalf("timed out waiting for team %s seqno link %d", team, toSeqno)
491}
492
493func (u *smuUser) createTeam(writers []*smuUser) smuTeam {
494	return u.createTeam2(nil, writers, nil, nil)
495}
496
497func (u *smuUser) createTeam2(readers, writers, admins, owners []*smuUser) smuTeam {
498	name := u.username + "t"
499	nameK1, err := keybase1.TeamNameFromString(name)
500	require.NoError(u.ctx.t, err)
501	cli := u.getTeamsClient()
502	x, err := cli.TeamCreate(context.TODO(), keybase1.TeamCreateArg{Name: nameK1.String()})
503	require.NoError(u.ctx.t, err)
504	lists := [][]*smuUser{readers, writers, admins, owners}
505	roles := []keybase1.TeamRole{keybase1.TeamRole_READER,
506		keybase1.TeamRole_WRITER, keybase1.TeamRole_ADMIN, keybase1.TeamRole_OWNER}
507	for i, list := range lists {
508		for _, u2 := range list {
509			_, err = cli.TeamAddMember(context.TODO(), keybase1.TeamAddMemberArg{
510				TeamID:   x.TeamID,
511				Username: u2.username,
512				Role:     roles[i],
513			})
514			require.NoError(u.ctx.t, err)
515		}
516	}
517	return smuTeam{ID: x.TeamID, name: name}
518}
519
520func (u *smuUser) lookupImplicitTeam(create bool, displayName string, public bool) smuImplicitTeam {
521	cli := u.getTeamsClient()
522	var err error
523	var res keybase1.LookupImplicitTeamRes
524	if create {
525		res, err = cli.LookupOrCreateImplicitTeam(context.TODO(), keybase1.LookupOrCreateImplicitTeamArg{Name: displayName, Public: public})
526	} else {
527		res, err = cli.LookupImplicitTeam(context.TODO(), keybase1.LookupImplicitTeamArg{Name: displayName, Public: public})
528	}
529	if err != nil {
530		u.ctx.t.Fatal(err)
531	}
532	return smuImplicitTeam{ID: res.TeamID}
533}
534
535func (u *smuUser) loadTeam(teamname string, admin bool) *teams.Team {
536	team, err := teams.Load(context.Background(), u.getPrimaryGlobalContext(), keybase1.LoadTeamArg{
537		Name:        teamname,
538		NeedAdmin:   admin,
539		ForceRepoll: true,
540	})
541	require.NoError(u.ctx.t, err)
542	return team
543}
544
545func (u *smuUser) addTeamMember(team smuTeam, member *smuUser, role keybase1.TeamRole) {
546	cli := u.getTeamsClient()
547	_, err := cli.TeamAddMember(context.TODO(), keybase1.TeamAddMemberArg{
548		TeamID:   team.ID,
549		Username: member.username,
550		Role:     role,
551	})
552	require.NoError(u.ctx.t, err)
553}
554
555func (u *smuUser) addWriter(team smuTeam, w *smuUser) {
556	u.addTeamMember(team, w, keybase1.TeamRole_WRITER)
557}
558
559func (u *smuUser) addAdmin(team smuTeam, w *smuUser) {
560	u.addTeamMember(team, w, keybase1.TeamRole_ADMIN)
561}
562
563func (u *smuUser) editMember(team *smuTeam, username string, role keybase1.TeamRole) {
564	err := u.getTeamsClient().TeamEditMember(context.TODO(), keybase1.TeamEditMemberArg{
565		Name:     team.name,
566		Username: username,
567		Role:     role,
568	})
569	require.NoError(u.ctx.t, err)
570}
571
572func (u *smuUser) reAddUserAfterReset(team smuImplicitTeam, w *smuUser) {
573	cli := u.getTeamsClient()
574	err := cli.TeamReAddMemberAfterReset(context.TODO(), keybase1.TeamReAddMemberAfterResetArg{
575		Id:       team.ID,
576		Username: w.username,
577	})
578	require.NoError(u.ctx.t, err)
579}
580
581func (u *smuUser) reset() {
582	g := u.getPrimaryGlobalContext()
583	ui := genericUI{
584		g:        g,
585		SecretUI: u.secretUI(),
586	}
587	g.SetUI(&ui)
588	cmd := client.NewCmdAccountResetRunner(g)
589	err := cmd.Run()
590	if err != nil {
591		u.ctx.t.Fatal(err)
592	}
593}
594
595func (u *smuUser) delete() {
596	g := u.getPrimaryGlobalContext()
597	ui := genericUI{
598		g:          g,
599		SecretUI:   u.secretUI(),
600		TerminalUI: smuTerminalUI{},
601	}
602	g.SetUI(&ui)
603	cmd := client.NewCmdAccountDeleteRunner(g)
604	err := cmd.Run()
605	if err != nil {
606		u.ctx.t.Fatal(err)
607	}
608}
609
610func (u *smuUser) dbNuke() {
611	err := u.primaryDevice().ctlClient().DbNuke(context.TODO(), 0)
612	require.NoError(u.ctx.t, err)
613}
614
615func (u *smuUser) userVersion() keybase1.UserVersion {
616	uv, err := u.primaryDevice().userClient().MeUserVersion(context.Background(), keybase1.MeUserVersionArg{ForcePoll: true})
617	require.NoError(u.ctx.t, err)
618	return uv
619}
620
621func (u *smuUser) MetaContext() libkb.MetaContext {
622	return libkb.NewMetaContextForTest(*u.primaryDevice().tctx)
623}
624
625func (u *smuUser) getPrimaryGlobalContext() *libkb.GlobalContext {
626	return u.primaryDevice().tctx.G
627}
628
629func (u *smuUser) setUIDMapperNoCachingMode(enabled bool) {
630	u.getPrimaryGlobalContext().UIDMapper.SetTestingNoCachingMode(enabled)
631}
632
633func (u *smuUser) loginAfterReset(numClones int) *smuDeviceWrapper {
634	return u.loginAfterResetHelper(numClones, true)
635}
636
637func (u *smuUser) loginAfterResetNoPUK(numClones int) *smuDeviceWrapper {
638	return u.loginAfterResetHelper(numClones, false)
639}
640
641func (u *smuUser) loginAfterResetHelper(numClones int, puk bool) *smuDeviceWrapper {
642	dev := u.ctx.newDeviceHelper(u, numClones, puk)
643	u.primary = dev
644	g := dev.tctx.G
645	ui := genericUI{
646		g:           g,
647		SecretUI:    u.secretUI(),
648		LoginUI:     usernameLoginUI{u.userInfo.username},
649		ProvisionUI: nullProvisionUI{randomDevice()},
650	}
651	g.SetUI(&ui)
652	cmd := client.NewCmdLoginRunner(g)
653	err := cmd.Run()
654	require.NoError(u.ctx.t, err, "login after reset")
655	return dev
656}
657
658func (u *smuUser) secretUI() signupInfoSecretUI {
659	return signupInfoSecretUI{u.userInfo, u.ctx.log}
660}
661
662func (u *smuUser) teamGet(team smuTeam) (keybase1.TeamDetails, error) {
663	cli := u.getTeamsClient()
664	details, err := cli.TeamGet(context.TODO(), keybase1.TeamGetArg{Name: team.name})
665	return details, err
666}
667
668func (u *smuUser) teamMemberDetails(team smuTeam, user *smuUser) ([]keybase1.TeamMemberDetails, error) {
669	teamDetails, err := u.teamGet(team)
670	if err != nil {
671		return nil, err
672	}
673	var all []keybase1.TeamMemberDetails
674	all = append(all, teamDetails.Members.Owners...)
675	all = append(all, teamDetails.Members.Admins...)
676	all = append(all, teamDetails.Members.Writers...)
677	all = append(all, teamDetails.Members.Readers...)
678
679	var matches []keybase1.TeamMemberDetails
680	for _, m := range all {
681		if m.Username == user.username {
682			matches = append(matches, m)
683		}
684	}
685	if len(matches) == 0 {
686		return nil, libkb.NotFoundError{}
687	}
688	return matches, nil
689}
690
691func (u *smuUser) isMemberActive(team smuTeam, user *smuUser) (bool, error) {
692	details, err := u.teamMemberDetails(team, user)
693	u.ctx.t.Logf("isMemberActive team member details for %s: %+v", user.username, details)
694	if err != nil {
695		return false, err
696	}
697	for _, d := range details {
698		if d.Status.IsActive() {
699			return true, nil
700		}
701	}
702	return false, nil
703}
704
705func (u *smuUser) assertMemberActive(team smuTeam, user *smuUser) {
706	active, err := u.isMemberActive(team, user)
707	require.NoError(u.ctx.t, err, "assertMemberInactive error: %s", err)
708	require.True(u.ctx.t, active, "user %s is inactive (expected active)", user.username)
709}
710
711func (u *smuUser) assertMemberInactive(team smuTeam, user *smuUser) {
712	active, err := u.isMemberActive(team, user)
713	require.NoError(u.ctx.t, err, "assertMemberInactive error: %s", err)
714	require.False(u.ctx.t, active, "user %s is active (expected inactive)", user.username)
715}
716
717func (u *smuUser) assertMemberMissing(team smuTeam, user *smuUser) {
718	_, err := u.teamMemberDetails(team, user)
719	require.Error(user.ctx.t, err, "member should not be found")
720	require.Equal(user.ctx.t, libkb.NotFoundError{}, err, "member should not be found")
721}
722
723func (u *smuUser) uid() keybase1.UID {
724	return u.primaryDevice().tctx.G.Env.GetUID()
725}
726
727func (u *smuUser) openTeam(team smuTeam, role keybase1.TeamRole) {
728	cli := u.getTeamsClient()
729	err := cli.TeamSetSettings(context.Background(), keybase1.TeamSetSettingsArg{
730		TeamID: team.ID,
731		Settings: keybase1.TeamSettings{
732			Open:   true,
733			JoinAs: role,
734		},
735	})
736	if err != nil {
737		u.ctx.t.Fatal(err)
738	}
739}
740
741func (u *smuUser) requestAccess(team smuTeam) {
742	cli := u.getTeamsClient()
743	_, err := cli.TeamRequestAccess(context.Background(), keybase1.TeamRequestAccessArg{
744		Name: team.name,
745	})
746	if err != nil {
747		u.ctx.t.Fatal(err)
748	}
749}
750
751func (u *smuUser) readChatsWithError(team smuTeam) (messages []chat1.MessageUnboxed, err error) {
752	return u.readChatsWithErrorAndDevice(team, u.primaryDevice(), 0)
753}
754
755// readChatsWithErrorAndDevice reads chats from `team` to get *at least*
756// `nMessages` messages.
757func (u *smuUser) readChatsWithErrorAndDevice(team smuTeam, dev *smuDeviceWrapper, nMessages int) (messages []chat1.MessageUnboxed, err error) {
758	tctx := dev.popClone()
759
760	pollInterval := 100 * time.Millisecond
761	timeLimit := 10 * time.Second
762	timeout := time.After(timeLimit)
763
764pollLoop:
765	for i := 0; ; i++ {
766		runner := client.NewCmdChatReadRunner(tctx.G)
767		runner.SetTeamChatForTest(team.name)
768		_, messages, err = runner.Fetch()
769		if err != nil {
770			u.ctx.t.Logf("readChatsWithErrorAndDevice failure: %s", err.Error())
771			return nil, err
772		}
773
774		if len(messages) >= nMessages {
775			u.ctx.t.Logf("readChatsWithErrorAndDevice success after retrying %d times, got %d msgs, asked for %d", i, len(messages), nMessages)
776			return messages, nil
777		}
778
779		u.ctx.t.Logf("readChatsWithErrorAndDevice trying again in %s (i=%d)", pollInterval, i)
780		select {
781		case <-timeout:
782			break pollLoop
783		case <-time.After(pollInterval):
784		}
785	}
786
787	u.ctx.t.Logf("Failed to readChatsWithErrorAndDevice after polling for %s", timeLimit)
788	return nil, fmt.Errorf("failed to read messages after polling for %s", timeLimit)
789}
790
791func (u *smuUser) readChats(team smuTeam, nMessages int) {
792	u.readChatsWithDevice(team, u.primaryDevice(), nMessages)
793}
794
795func (u *smuUser) readChatsWithDevice(team smuTeam, dev *smuDeviceWrapper, nMessages int) {
796	messages, err := u.readChatsWithErrorAndDevice(team, dev, nMessages)
797	t := u.ctx.t
798	require.NoError(t, err)
799
800	// Filter out journeycards
801	originalLen := len(messages)
802	n := 0 // https://github.com/golang/go/wiki/SliceTricks#filter-in-place
803	for _, msg := range messages {
804		if !msg.IsJourneycard() {
805			messages[n] = msg
806			n++
807		}
808	}
809	messages = messages[:n]
810	if originalLen < len(messages) {
811		t.Logf("filtered out %v journeycard messages", originalLen-len(messages))
812	}
813
814	if len(messages) != nMessages {
815		t.Logf("messages: %v", chat1.MessageUnboxedDebugLines(messages))
816	}
817	require.Len(t, messages, nMessages)
818
819	for i, msg := range messages {
820		require.Equal(t, msg.Valid().MessageBody.Text().Body, fmt.Sprintf("%d", len(messages)-i-1))
821	}
822	divDebug(u.ctx, "readChat success for %s", u.username)
823}
824
825func (u *smuUser) sendChat(t smuTeam, msg string) {
826	tctx := u.primaryDevice().popClone()
827	runner := client.NewCmdChatSendRunner(tctx.G)
828	runner.SetTeamChatForTest(t.name)
829	runner.SetMessage(msg)
830	err := runner.Run()
831	if err != nil {
832		u.ctx.t.Fatal(err)
833	}
834}
835