1package raft 2 3import ( 4 "bytes" 5 "fmt" 6 "io" 7 "io/ioutil" 8 "log" 9 "os" 10 "reflect" 11 "sync" 12 "testing" 13 "time" 14 15 "github.com/hashicorp/go-hclog" 16 "github.com/hashicorp/go-msgpack/codec" 17) 18 19// Return configurations optimized for in-memory 20func inmemConfig(t *testing.T) *Config { 21 conf := DefaultConfig() 22 conf.HeartbeatTimeout = 50 * time.Millisecond 23 conf.ElectionTimeout = 50 * time.Millisecond 24 conf.LeaderLeaseTimeout = 50 * time.Millisecond 25 conf.CommitTimeout = 5 * time.Millisecond 26 conf.Logger = newTestLeveledLogger(t) 27 return conf 28} 29 30// MockFSM is an implementation of the FSM interface, and just stores 31// the logs sequentially. 32// 33// NOTE: This is exposed for middleware testing purposes and is not a stable API 34type MockFSM struct { 35 sync.Mutex 36 logs [][]byte 37 configurations []Configuration 38} 39 40// NOTE: This is exposed for middleware testing purposes and is not a stable API 41type MockFSMConfigStore struct { 42 FSM 43} 44 45// NOTE: This is exposed for middleware testing purposes and is not a stable API 46type WrappingFSM interface { 47 Underlying() FSM 48} 49 50func getMockFSM(fsm FSM) *MockFSM { 51 switch f := fsm.(type) { 52 case *MockFSM: 53 return f 54 case *MockFSMConfigStore: 55 return f.FSM.(*MockFSM) 56 case WrappingFSM: 57 return getMockFSM(f.Underlying()) 58 } 59 60 return nil 61} 62 63// NOTE: This is exposed for middleware testing purposes and is not a stable API 64type MockSnapshot struct { 65 logs [][]byte 66 maxIndex int 67} 68 69var _ ConfigurationStore = (*MockFSMConfigStore)(nil) 70 71// NOTE: This is exposed for middleware testing purposes and is not a stable API 72func (m *MockFSM) Apply(log *Log) interface{} { 73 m.Lock() 74 defer m.Unlock() 75 m.logs = append(m.logs, log.Data) 76 return len(m.logs) 77} 78 79// NOTE: This is exposed for middleware testing purposes and is not a stable API 80func (m *MockFSM) Snapshot() (FSMSnapshot, error) { 81 m.Lock() 82 defer m.Unlock() 83 return &MockSnapshot{m.logs, len(m.logs)}, nil 84} 85 86// NOTE: This is exposed for middleware testing purposes and is not a stable API 87func (m *MockFSM) Restore(inp io.ReadCloser) error { 88 m.Lock() 89 defer m.Unlock() 90 defer inp.Close() 91 hd := codec.MsgpackHandle{} 92 dec := codec.NewDecoder(inp, &hd) 93 94 m.logs = nil 95 return dec.Decode(&m.logs) 96} 97 98// NOTE: This is exposed for middleware testing purposes and is not a stable API 99func (m *MockFSM) Logs() [][]byte { 100 m.Lock() 101 defer m.Unlock() 102 return m.logs 103} 104 105// NOTE: This is exposed for middleware testing purposes and is not a stable API 106func (m *MockFSMConfigStore) StoreConfiguration(index uint64, config Configuration) { 107 mm := m.FSM.(*MockFSM) 108 mm.Lock() 109 defer mm.Unlock() 110 mm.configurations = append(mm.configurations, config) 111} 112 113// NOTE: This is exposed for middleware testing purposes and is not a stable API 114func (m *MockSnapshot) Persist(sink SnapshotSink) error { 115 hd := codec.MsgpackHandle{} 116 enc := codec.NewEncoder(sink, &hd) 117 if err := enc.Encode(m.logs[:m.maxIndex]); err != nil { 118 sink.Cancel() 119 return err 120 } 121 sink.Close() 122 return nil 123} 124 125// NOTE: This is exposed for middleware testing purposes and is not a stable API 126func (m *MockSnapshot) Release() { 127} 128 129// This can be used as the destination for a logger and it'll 130// map them into calls to testing.T.Log, so that you only see 131// the logging for failed tests. 132type testLoggerAdapter struct { 133 t *testing.T 134 prefix string 135} 136 137func (a *testLoggerAdapter) Write(d []byte) (int, error) { 138 if d[len(d)-1] == '\n' { 139 d = d[:len(d)-1] 140 } 141 if a.prefix != "" { 142 l := a.prefix + ": " + string(d) 143 if testing.Verbose() { 144 fmt.Printf("testLoggerAdapter verbose: %s\n", l) 145 } 146 a.t.Log(l) 147 return len(l), nil 148 } 149 150 a.t.Log(string(d)) 151 return len(d), nil 152} 153 154func newTestLogger(t *testing.T) *log.Logger { 155 return log.New(&testLoggerAdapter{t: t}, "", log.Lmicroseconds) 156} 157 158func newTestLoggerWithPrefix(t *testing.T, prefix string) *log.Logger { 159 return log.New(&testLoggerAdapter{t: t, prefix: prefix}, "", log.Lmicroseconds) 160} 161 162func newTestLeveledLogger(t *testing.T) hclog.Logger { 163 return hclog.New(&hclog.LoggerOptions{ 164 Name: "", 165 Output: &testLoggerAdapter{t: t}, 166 }) 167} 168 169func newTestLeveledLoggerWithPrefix(t *testing.T, prefix string) hclog.Logger { 170 return hclog.New(&hclog.LoggerOptions{ 171 Name: prefix, 172 Output: &testLoggerAdapter{t: t, prefix: prefix}, 173 }) 174} 175 176type cluster struct { 177 dirs []string 178 stores []*InmemStore 179 fsms []FSM 180 snaps []*FileSnapshotStore 181 trans []LoopbackTransport 182 rafts []*Raft 183 t *testing.T 184 observationCh chan Observation 185 conf *Config 186 propagateTimeout time.Duration 187 longstopTimeout time.Duration 188 logger *log.Logger 189 startTime time.Time 190 191 failedLock sync.Mutex 192 failedCh chan struct{} 193 failed bool 194} 195 196func (c *cluster) Merge(other *cluster) { 197 c.dirs = append(c.dirs, other.dirs...) 198 c.stores = append(c.stores, other.stores...) 199 c.fsms = append(c.fsms, other.fsms...) 200 c.snaps = append(c.snaps, other.snaps...) 201 c.trans = append(c.trans, other.trans...) 202 c.rafts = append(c.rafts, other.rafts...) 203} 204 205// notifyFailed will close the failed channel which can signal the goroutine 206// running the test that another goroutine has detected a failure in order to 207// terminate the test. 208func (c *cluster) notifyFailed() { 209 c.failedLock.Lock() 210 defer c.failedLock.Unlock() 211 if !c.failed { 212 c.failed = true 213 close(c.failedCh) 214 } 215} 216 217// Failf provides a logging function that fails the tests, prints the output 218// with microseconds, and does not mysteriously eat the string. This can be 219// safely called from goroutines but won't immediately halt the test. The 220// failedCh will be closed to allow blocking functions in the main thread to 221// detect the failure and react. Note that you should arrange for the main 222// thread to block until all goroutines have completed in order to reliably 223// fail tests using this function. 224func (c *cluster) Failf(format string, args ...interface{}) { 225 c.logger.Printf(format, args...) 226 c.t.Fail() 227 c.notifyFailed() 228} 229 230// FailNowf provides a logging function that fails the tests, prints the output 231// with microseconds, and does not mysteriously eat the string. FailNowf must be 232// called from the goroutine running the test or benchmark function, not from 233// other goroutines created during the test. Calling FailNowf does not stop 234// those other goroutines. 235func (c *cluster) FailNowf(format string, args ...interface{}) { 236 c.logger.Printf(format, args...) 237 c.t.FailNow() 238} 239 240// Close shuts down the cluster and cleans up. 241func (c *cluster) Close() { 242 var futures []Future 243 for _, r := range c.rafts { 244 futures = append(futures, r.Shutdown()) 245 } 246 247 // Wait for shutdown 248 limit := time.AfterFunc(c.longstopTimeout, func() { 249 // We can't FailNowf here, and c.Failf won't do anything if we 250 // hang, so panic. 251 panic("timed out waiting for shutdown") 252 }) 253 defer limit.Stop() 254 255 for _, f := range futures { 256 if err := f.Error(); err != nil { 257 c.FailNowf("[ERR] shutdown future err: %v", err) 258 } 259 } 260 261 for _, d := range c.dirs { 262 os.RemoveAll(d) 263 } 264} 265 266// WaitEventChan returns a channel which will signal if an observation is made 267// or a timeout occurs. It is possible to set a filter to look for specific 268// observations. Setting timeout to 0 means that it will wait forever until a 269// non-filtered observation is made. 270func (c *cluster) WaitEventChan(filter FilterFn, timeout time.Duration) <-chan struct{} { 271 ch := make(chan struct{}) 272 go func() { 273 defer close(ch) 274 var timeoutCh <-chan time.Time 275 if timeout > 0 { 276 timeoutCh = time.After(timeout) 277 } 278 for { 279 select { 280 case <-timeoutCh: 281 return 282 283 case o, ok := <-c.observationCh: 284 if !ok || filter == nil || filter(&o) { 285 return 286 } 287 } 288 } 289 }() 290 return ch 291} 292 293// WaitEvent waits until an observation is made, a timeout occurs, or a test 294// failure is signaled. It is possible to set a filter to look for specific 295// observations. Setting timeout to 0 means that it will wait forever until a 296// non-filtered observation is made or a test failure is signaled. 297func (c *cluster) WaitEvent(filter FilterFn, timeout time.Duration) { 298 select { 299 case <-c.failedCh: 300 c.t.FailNow() 301 302 case <-c.WaitEventChan(filter, timeout): 303 } 304} 305 306// WaitForReplication blocks until every FSM in the cluster has the given 307// length, or the long sanity check timeout expires. 308func (c *cluster) WaitForReplication(fsmLength int) { 309 limitCh := time.After(c.longstopTimeout) 310 311CHECK: 312 for { 313 ch := c.WaitEventChan(nil, c.conf.CommitTimeout) 314 select { 315 case <-c.failedCh: 316 c.t.FailNow() 317 318 case <-limitCh: 319 c.FailNowf("[ERR] Timeout waiting for replication") 320 321 case <-ch: 322 for _, fsmRaw := range c.fsms { 323 fsm := getMockFSM(fsmRaw) 324 fsm.Lock() 325 num := len(fsm.logs) 326 fsm.Unlock() 327 if num != fsmLength { 328 continue CHECK 329 } 330 } 331 return 332 } 333 } 334} 335 336// pollState takes a snapshot of the state of the cluster. This might not be 337// stable, so use GetInState() to apply some additional checks when waiting 338// for the cluster to achieve a particular state. 339func (c *cluster) pollState(s RaftState) ([]*Raft, uint64) { 340 var highestTerm uint64 341 in := make([]*Raft, 0, 1) 342 for _, r := range c.rafts { 343 if r.State() == s { 344 in = append(in, r) 345 } 346 term := r.getCurrentTerm() 347 if term > highestTerm { 348 highestTerm = term 349 } 350 } 351 return in, highestTerm 352} 353 354// GetInState polls the state of the cluster and attempts to identify when it has 355// settled into the given state. 356func (c *cluster) GetInState(s RaftState) []*Raft { 357 c.logger.Printf("[INFO] Starting stability test for raft state: %+v", s) 358 limitCh := time.After(c.longstopTimeout) 359 360 // An election should complete after 2 * max(HeartbeatTimeout, ElectionTimeout) 361 // because of the randomised timer expiring in 1 x interval ... 2 x interval. 362 // We add a bit for propagation delay. If the election fails (e.g. because 363 // two elections start at once), we will have got something through our 364 // observer channel indicating a different state (i.e. one of the nodes 365 // will have moved to candidate state) which will reset the timer. 366 // 367 // Because of an implementation peculiarity, it can actually be 3 x timeout. 368 timeout := c.conf.HeartbeatTimeout 369 if timeout < c.conf.ElectionTimeout { 370 timeout = c.conf.ElectionTimeout 371 } 372 timeout = 2*timeout + c.conf.CommitTimeout 373 timer := time.NewTimer(timeout) 374 defer timer.Stop() 375 376 // Wait until we have a stable instate slice. Each time we see an 377 // observation a state has changed, recheck it and if it has changed, 378 // restart the timer. 379 var pollStartTime = time.Now() 380 for { 381 inState, highestTerm := c.pollState(s) 382 inStateTime := time.Now() 383 384 // Sometimes this routine is called very early on before the 385 // rafts have started up. We then timeout even though no one has 386 // even started an election. So if the highest term in use is 387 // zero, we know there are no raft processes that have yet issued 388 // a RequestVote, and we set a long time out. This is fixed when 389 // we hear the first RequestVote, at which point we reset the 390 // timer. 391 if highestTerm == 0 { 392 timer.Reset(c.longstopTimeout) 393 } else { 394 timer.Reset(timeout) 395 } 396 397 // Filter will wake up whenever we observe a RequestVote. 398 filter := func(ob *Observation) bool { 399 switch ob.Data.(type) { 400 case RaftState: 401 return true 402 case RequestVoteRequest: 403 return true 404 default: 405 return false 406 } 407 } 408 409 select { 410 case <-c.failedCh: 411 c.t.FailNow() 412 413 case <-limitCh: 414 c.FailNowf("[ERR] Timeout waiting for stable %s state", s) 415 416 case <-c.WaitEventChan(filter, 0): 417 c.logger.Printf("[DEBUG] Resetting stability timeout") 418 419 case t, ok := <-timer.C: 420 if !ok { 421 c.FailNowf("[ERR] Timer channel errored") 422 } 423 c.logger.Printf("[INFO] Stable state for %s reached at %s (%d nodes), %s from start of poll, %s from cluster start. Timeout at %s, %s after stability", 424 s, inStateTime, len(inState), inStateTime.Sub(pollStartTime), inStateTime.Sub(c.startTime), t, t.Sub(inStateTime)) 425 return inState 426 } 427 } 428} 429 430// Leader waits for the cluster to elect a leader and stay in a stable state. 431func (c *cluster) Leader() *Raft { 432 leaders := c.GetInState(Leader) 433 if len(leaders) != 1 { 434 c.FailNowf("[ERR] expected one leader: %v", leaders) 435 } 436 return leaders[0] 437} 438 439// Followers waits for the cluster to have N-1 followers and stay in a stable 440// state. 441func (c *cluster) Followers() []*Raft { 442 expFollowers := len(c.rafts) - 1 443 followers := c.GetInState(Follower) 444 if len(followers) != expFollowers { 445 c.FailNowf("[ERR] timeout waiting for %d followers (followers are %v)", expFollowers, followers) 446 } 447 return followers 448} 449 450// FullyConnect connects all the transports together. 451func (c *cluster) FullyConnect() { 452 c.logger.Printf("[DEBUG] Fully Connecting") 453 for i, t1 := range c.trans { 454 for j, t2 := range c.trans { 455 if i != j { 456 t1.Connect(t2.LocalAddr(), t2) 457 t2.Connect(t1.LocalAddr(), t1) 458 } 459 } 460 } 461} 462 463// Disconnect disconnects all transports from the given address. 464func (c *cluster) Disconnect(a ServerAddress) { 465 c.logger.Printf("[DEBUG] Disconnecting %v", a) 466 for _, t := range c.trans { 467 if t.LocalAddr() == a { 468 t.DisconnectAll() 469 } else { 470 t.Disconnect(a) 471 } 472 } 473} 474 475// Partition keeps the given list of addresses connected but isolates them 476// from the other members of the cluster. 477func (c *cluster) Partition(far []ServerAddress) { 478 c.logger.Printf("[DEBUG] Partitioning %v", far) 479 480 // Gather the set of nodes on the "near" side of the partition (we 481 // will call the supplied list of nodes the "far" side). 482 near := make(map[ServerAddress]struct{}) 483OUTER: 484 for _, t := range c.trans { 485 l := t.LocalAddr() 486 for _, a := range far { 487 if l == a { 488 continue OUTER 489 } 490 } 491 near[l] = struct{}{} 492 } 493 494 // Now fixup all the connections. The near side will be separated from 495 // the far side, and vice-versa. 496 for _, t := range c.trans { 497 l := t.LocalAddr() 498 if _, ok := near[l]; ok { 499 for _, a := range far { 500 t.Disconnect(a) 501 } 502 } else { 503 for a, _ := range near { 504 t.Disconnect(a) 505 } 506 } 507 } 508} 509 510// IndexOf returns the index of the given raft instance. 511func (c *cluster) IndexOf(r *Raft) int { 512 for i, n := range c.rafts { 513 if n == r { 514 return i 515 } 516 } 517 return -1 518} 519 520// EnsureLeader checks that ALL the nodes think the leader is the given expected 521// leader. 522func (c *cluster) EnsureLeader(t *testing.T, expect ServerAddress) { 523 // We assume c.Leader() has been called already; now check all the rafts 524 // think the leader is correct 525 fail := false 526 for _, r := range c.rafts { 527 leader := ServerAddress(r.Leader()) 528 if leader != expect { 529 if leader == "" { 530 leader = "[none]" 531 } 532 if expect == "" { 533 c.logger.Printf("[ERR] Peer %s sees leader %v expected [none]", r, leader) 534 } else { 535 c.logger.Printf("[ERR] Peer %s sees leader %v expected %v", r, leader, expect) 536 } 537 fail = true 538 } 539 } 540 if fail { 541 c.FailNowf("[ERR] At least one peer has the wrong notion of leader") 542 } 543} 544 545// EnsureSame makes sure all the FSMs have the same contents. 546func (c *cluster) EnsureSame(t *testing.T) { 547 limit := time.Now().Add(c.longstopTimeout) 548 first := getMockFSM(c.fsms[0]) 549 550CHECK: 551 first.Lock() 552 for i, fsmRaw := range c.fsms { 553 fsm := getMockFSM(fsmRaw) 554 if i == 0 { 555 continue 556 } 557 fsm.Lock() 558 559 if len(first.logs) != len(fsm.logs) { 560 fsm.Unlock() 561 if time.Now().After(limit) { 562 c.FailNowf("[ERR] FSM log length mismatch: %d %d", 563 len(first.logs), len(fsm.logs)) 564 } else { 565 goto WAIT 566 } 567 } 568 569 for idx := 0; idx < len(first.logs); idx++ { 570 if bytes.Compare(first.logs[idx], fsm.logs[idx]) != 0 { 571 fsm.Unlock() 572 if time.Now().After(limit) { 573 c.FailNowf("[ERR] FSM log mismatch at index %d", idx) 574 } else { 575 goto WAIT 576 } 577 } 578 } 579 if len(first.configurations) != len(fsm.configurations) { 580 fsm.Unlock() 581 if time.Now().After(limit) { 582 c.FailNowf("[ERR] FSM configuration length mismatch: %d %d", 583 len(first.logs), len(fsm.logs)) 584 } else { 585 goto WAIT 586 } 587 } 588 589 for idx := 0; idx < len(first.configurations); idx++ { 590 if !reflect.DeepEqual(first.configurations[idx], fsm.configurations[idx]) { 591 fsm.Unlock() 592 if time.Now().After(limit) { 593 c.FailNowf("[ERR] FSM configuration mismatch at index %d: %v, %v", idx, first.configurations[idx], fsm.configurations[idx]) 594 } else { 595 goto WAIT 596 } 597 } 598 } 599 fsm.Unlock() 600 } 601 602 first.Unlock() 603 return 604 605WAIT: 606 first.Unlock() 607 c.WaitEvent(nil, c.conf.CommitTimeout) 608 goto CHECK 609} 610 611// getConfiguration returns the configuration of the given Raft instance, or 612// fails the test if there's an error 613func (c *cluster) getConfiguration(r *Raft) Configuration { 614 future := r.GetConfiguration() 615 if err := future.Error(); err != nil { 616 c.FailNowf("[ERR] failed to get configuration: %v", err) 617 return Configuration{} 618 } 619 620 return future.Configuration() 621} 622 623// EnsureSamePeers makes sure all the rafts have the same set of peers. 624func (c *cluster) EnsureSamePeers(t *testing.T) { 625 limit := time.Now().Add(c.longstopTimeout) 626 peerSet := c.getConfiguration(c.rafts[0]) 627 628CHECK: 629 for i, raft := range c.rafts { 630 if i == 0 { 631 continue 632 } 633 634 otherSet := c.getConfiguration(raft) 635 if !reflect.DeepEqual(peerSet, otherSet) { 636 if time.Now().After(limit) { 637 c.FailNowf("[ERR] peer mismatch: %+v %+v", peerSet, otherSet) 638 } else { 639 goto WAIT 640 } 641 } 642 } 643 return 644 645WAIT: 646 c.WaitEvent(nil, c.conf.CommitTimeout) 647 goto CHECK 648} 649 650// NOTE: This is exposed for middleware testing purposes and is not a stable API 651type MakeClusterOpts struct { 652 Peers int 653 Bootstrap bool 654 Conf *Config 655 ConfigStoreFSM bool 656 MakeFSMFunc func() FSM 657 LongstopTimeout time.Duration 658} 659 660// makeCluster will return a cluster with the given config and number of peers. 661// If bootstrap is true, the servers will know about each other before starting, 662// otherwise their transports will be wired up but they won't yet have configured 663// each other. 664func makeCluster(t *testing.T, opts *MakeClusterOpts) *cluster { 665 if opts.Conf == nil { 666 opts.Conf = inmemConfig(t) 667 } 668 669 c := &cluster{ 670 observationCh: make(chan Observation, 1024), 671 conf: opts.Conf, 672 // Propagation takes a maximum of 2 heartbeat timeouts (time to 673 // get a new heartbeat that would cause a commit) plus a bit. 674 propagateTimeout: opts.Conf.HeartbeatTimeout*2 + opts.Conf.CommitTimeout, 675 longstopTimeout: 5 * time.Second, 676 logger: newTestLoggerWithPrefix(t, "cluster"), 677 failedCh: make(chan struct{}), 678 } 679 if opts.LongstopTimeout > 0 { 680 c.longstopTimeout = opts.LongstopTimeout 681 } 682 683 c.t = t 684 var configuration Configuration 685 686 // Setup the stores and transports 687 for i := 0; i < opts.Peers; i++ { 688 dir, err := ioutil.TempDir("", "raft") 689 if err != nil { 690 c.FailNowf("[ERR] err: %v ", err) 691 } 692 693 store := NewInmemStore() 694 c.dirs = append(c.dirs, dir) 695 c.stores = append(c.stores, store) 696 if opts.ConfigStoreFSM { 697 c.fsms = append(c.fsms, &MockFSMConfigStore{ 698 FSM: &MockFSM{}, 699 }) 700 } else { 701 var fsm FSM 702 if opts.MakeFSMFunc != nil { 703 fsm = opts.MakeFSMFunc() 704 } else { 705 fsm = &MockFSM{} 706 } 707 c.fsms = append(c.fsms, fsm) 708 } 709 710 dir2, snap := FileSnapTest(t) 711 c.dirs = append(c.dirs, dir2) 712 c.snaps = append(c.snaps, snap) 713 714 addr, trans := NewInmemTransport("") 715 c.trans = append(c.trans, trans) 716 localID := ServerID(fmt.Sprintf("server-%s", addr)) 717 if opts.Conf.ProtocolVersion < 3 { 718 localID = ServerID(addr) 719 } 720 configuration.Servers = append(configuration.Servers, Server{ 721 Suffrage: Voter, 722 ID: localID, 723 Address: addr, 724 }) 725 } 726 727 // Wire the transports together 728 c.FullyConnect() 729 730 // Create all the rafts 731 c.startTime = time.Now() 732 for i := 0; i < opts.Peers; i++ { 733 logs := c.stores[i] 734 store := c.stores[i] 735 snap := c.snaps[i] 736 trans := c.trans[i] 737 738 peerConf := opts.Conf 739 peerConf.LocalID = configuration.Servers[i].ID 740 peerConf.Logger = newTestLeveledLoggerWithPrefix(t, string(configuration.Servers[i].ID)) 741 742 if opts.Bootstrap { 743 err := BootstrapCluster(peerConf, logs, store, snap, trans, configuration) 744 if err != nil { 745 c.FailNowf("[ERR] BootstrapCluster failed: %v", err) 746 } 747 } 748 749 raft, err := NewRaft(peerConf, c.fsms[i], logs, store, snap, trans) 750 if err != nil { 751 c.FailNowf("[ERR] NewRaft failed: %v", err) 752 } 753 754 raft.RegisterObserver(NewObserver(c.observationCh, false, nil)) 755 if err != nil { 756 c.FailNowf("[ERR] RegisterObserver failed: %v", err) 757 } 758 c.rafts = append(c.rafts, raft) 759 } 760 761 return c 762} 763 764// NOTE: This is exposed for middleware testing purposes and is not a stable API 765func MakeCluster(n int, t *testing.T, conf *Config) *cluster { 766 return makeCluster(t, &MakeClusterOpts{ 767 Peers: n, 768 Bootstrap: true, 769 Conf: conf, 770 }) 771} 772 773// NOTE: This is exposed for middleware testing purposes and is not a stable API 774func MakeClusterNoBootstrap(n int, t *testing.T, conf *Config) *cluster { 775 return makeCluster(t, &MakeClusterOpts{ 776 Peers: n, 777 Conf: conf, 778 }) 779} 780 781// NOTE: This is exposed for middleware testing purposes and is not a stable API 782func MakeClusterCustom(t *testing.T, opts *MakeClusterOpts) *cluster { 783 return makeCluster(t, opts) 784} 785 786// NOTE: This is exposed for middleware testing purposes and is not a stable API 787func FileSnapTest(t *testing.T) (string, *FileSnapshotStore) { 788 // Create a test dir 789 dir, err := ioutil.TempDir("", "raft") 790 if err != nil { 791 t.Fatalf("err: %v ", err) 792 } 793 794 snap, err := NewFileSnapshotStoreWithLogger(dir, 3, newTestLogger(t)) 795 if err != nil { 796 t.Fatalf("err: %v", err) 797 } 798 return dir, snap 799} 800