1//go:build testtools
2// +build testtools
3
4package main
5
6import (
7	"bufio"
8	"bytes"
9	"compress/gzip"
10	"crypto/rand"
11	"crypto/rsa"
12	"crypto/sha256"
13	"crypto/tls"
14	"crypto/x509"
15	"crypto/x509/pkix"
16	"encoding/base64"
17	"encoding/hex"
18	"encoding/json"
19	"encoding/pem"
20	"errors"
21	"fmt"
22	"io"
23	"io/ioutil"
24	"log"
25	"math"
26	"math/big"
27	"net/http"
28	"net/http/httptest"
29	"net/textproto"
30	"os"
31	"os/exec"
32	"regexp"
33	"sort"
34	"strconv"
35	"strings"
36	"sync"
37	"time"
38)
39
40var (
41	repoDir          string
42	largeObjects     = newLfsStorage()
43	server           *httptest.Server
44	serverTLS        *httptest.Server
45	serverClientCert *httptest.Server
46
47	// maps OIDs to content strings. Both the LFS and Storage test servers below
48	// see OIDs.
49	oidHandlers map[string]string
50
51	// These magic strings tell the test lfs server change their behavior so the
52	// integration tests can check those use cases. Tests will create objects with
53	// the magic strings as the contents.
54	//
55	//   printf "status:lfs:404" > 404.dat
56	//
57	contentHandlers = []string{
58		"status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500",
59		"status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-storage-503",
60		"status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", "return-expired-action-forever", "return-invalid-size",
61		"object-authenticated", "storage-download-retry", "storage-upload-retry", "storage-upload-retry-later", "unknown-oid",
62		"send-verify-action", "send-deprecated-links", "redirect-storage-upload", "storage-compress", "batch-hash-algo-empty", "batch-hash-algo-invalid",
63	}
64
65	reqCookieReposRE = regexp.MustCompile(`\A/require-cookie-`)
66)
67
68func main() {
69	repoDir = os.Getenv("LFSTEST_DIR")
70
71	mux := http.NewServeMux()
72	server = httptest.NewServer(mux)
73	serverTLS = httptest.NewTLSServer(mux)
74	serverClientCert = httptest.NewUnstartedServer(mux)
75
76	//setup Client Cert server
77	rootKey, rootCert := generateCARootCertificates()
78	_, clientCertPEM, clientKeyPEM, clientKeyEncPEM := generateClientCertificates(rootCert, rootKey)
79
80	certPool := x509.NewCertPool()
81	certPool.AddCert(rootCert)
82
83	serverClientCert.TLS = &tls.Config{
84		Certificates: []tls.Certificate{serverTLS.TLS.Certificates[0]},
85		ClientAuth:   tls.RequireAndVerifyClientCert,
86		ClientCAs:    certPool,
87	}
88	serverClientCert.StartTLS()
89
90	stopch := make(chan bool)
91
92	mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
93		stopch <- true
94	})
95
96	mux.HandleFunc("/storage/", storageHandler)
97	mux.HandleFunc("/verify", verifyHandler)
98	mux.HandleFunc("/redirect307/", redirect307Handler)
99	mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
100		fmt.Fprintf(w, "%s\n", time.Now().String())
101	})
102	mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
103		id, ok := reqId(w)
104		if !ok {
105			return
106		}
107
108		if reqCookieReposRE.MatchString(r.URL.Path) {
109			if skipIfNoCookie(w, r, id) {
110				return
111			}
112		}
113
114		if strings.Contains(r.URL.Path, "/info/lfs") {
115			if !skipIfBadAuth(w, r, id) {
116				lfsHandler(w, r, id)
117			}
118
119			return
120		}
121
122		debug(id, "git http-backend %s %s", r.Method, r.URL)
123		gitHandler(w, r)
124	})
125
126	urlname := writeTestStateFile([]byte(server.URL), "LFSTEST_URL", "lfstest-gitserver")
127	defer os.RemoveAll(urlname)
128
129	sslurlname := writeTestStateFile([]byte(serverTLS.URL), "LFSTEST_SSL_URL", "lfstest-gitserver-ssl")
130	defer os.RemoveAll(sslurlname)
131
132	clientCertUrlname := writeTestStateFile([]byte(serverClientCert.URL), "LFSTEST_CLIENT_CERT_URL", "lfstest-gitserver-ssl")
133	defer os.RemoveAll(clientCertUrlname)
134
135	block := &pem.Block{}
136	block.Type = "CERTIFICATE"
137	block.Bytes = serverTLS.TLS.Certificates[0].Certificate[0]
138	pembytes := pem.EncodeToMemory(block)
139
140	certname := writeTestStateFile(pembytes, "LFSTEST_CERT", "lfstest-gitserver-cert")
141	defer os.RemoveAll(certname)
142
143	cccertname := writeTestStateFile(clientCertPEM, "LFSTEST_CLIENT_CERT", "lfstest-gitserver-client-cert")
144	defer os.RemoveAll(cccertname)
145
146	ckcertname := writeTestStateFile(clientKeyPEM, "LFSTEST_CLIENT_KEY", "lfstest-gitserver-client-key")
147	defer os.RemoveAll(ckcertname)
148
149	ckecertname := writeTestStateFile(clientKeyEncPEM, "LFSTEST_CLIENT_KEY_ENCRYPTED", "lfstest-gitserver-client-key-enc")
150	defer os.RemoveAll(ckecertname)
151
152	debug("init", "server url: %s", server.URL)
153	debug("init", "server tls url: %s", serverTLS.URL)
154	debug("init", "server client cert url: %s", serverClientCert.URL)
155
156	<-stopch
157	debug("init", "git server done")
158}
159
160// writeTestStateFile writes contents to either the file referenced by the
161// environment variable envVar, or defaultFilename if that's not set. Returns
162// the filename that was used
163func writeTestStateFile(contents []byte, envVar, defaultFilename string) string {
164	f := os.Getenv(envVar)
165	if len(f) == 0 {
166		f = defaultFilename
167	}
168	file, err := os.Create(f)
169	if err != nil {
170		log.Fatalln(err)
171	}
172	file.Write(contents)
173	file.Close()
174	return f
175}
176
177type lfsObject struct {
178	Oid           string              `json:"oid,omitempty"`
179	Size          int64               `json:"size,omitempty"`
180	Authenticated bool                `json:"authenticated,omitempty"`
181	Actions       map[string]*lfsLink `json:"actions,omitempty"`
182	Links         map[string]*lfsLink `json:"_links,omitempty"`
183	Err           *lfsError           `json:"error,omitempty"`
184}
185
186type lfsLink struct {
187	Href      string            `json:"href"`
188	Header    map[string]string `json:"header,omitempty"`
189	ExpiresAt time.Time         `json:"expires_at,omitempty"`
190	ExpiresIn int               `json:"expires_in,omitempty"`
191}
192
193type lfsError struct {
194	Code    int    `json:"code,omitempty"`
195	Message string `json:"message"`
196}
197
198func writeLFSError(w http.ResponseWriter, code int, msg string) {
199	by, err := json.Marshal(&lfsError{Message: msg})
200	if err != nil {
201		http.Error(w, "json encoding error: "+err.Error(), 500)
202		return
203	}
204
205	w.Header().Set("Content-Type", "application/vnd.git-lfs+json")
206	w.WriteHeader(code)
207	w.Write(by)
208}
209
210// handles any requests with "{name}.server.git/info/lfs" in the path
211func lfsHandler(w http.ResponseWriter, r *http.Request, id string) {
212	repo, err := repoFromLfsUrl(r.URL.Path)
213	if err != nil {
214		w.WriteHeader(500)
215		w.Write([]byte(err.Error()))
216		return
217	}
218
219	// Check that we're sending valid data.
220	if !strings.Contains(r.Header.Get("Accept"), "application/vnd.git-lfs+json") {
221		w.WriteHeader(406)
222		return
223	}
224
225	debug(id, "git lfs %s %s repo: %s", r.Method, r.URL, repo)
226	w.Header().Set("Content-Type", "application/vnd.git-lfs+json")
227	switch r.Method {
228	case "POST":
229		// Reject invalid data.
230		if !strings.Contains(r.Header.Get("Content-Type"), "application/vnd.git-lfs+json") {
231			w.WriteHeader(400)
232			return
233		}
234
235		if strings.HasSuffix(r.URL.String(), "batch") {
236			lfsBatchHandler(w, r, id, repo)
237		} else {
238			locksHandler(w, r, repo)
239		}
240	case "DELETE":
241		lfsDeleteHandler(w, r, id, repo)
242	case "GET":
243		if strings.Contains(r.URL.String(), "/locks") {
244			locksHandler(w, r, repo)
245		} else {
246			w.WriteHeader(404)
247			w.Write([]byte("lock request"))
248		}
249	default:
250		w.WriteHeader(405)
251	}
252}
253
254func lfsUrl(repo, oid string, redirect bool) string {
255	if redirect {
256		return server.URL + "/redirect307/objects/" + oid + "?r=" + repo
257	}
258	return server.URL + "/storage/" + oid + "?r=" + repo
259}
260
261const (
262	secondsToRefillTokens = 10
263	refillTokenCount      = 5
264)
265
266var (
267	requestTokens   = make(map[string]int)
268	retryStartTimes = make(map[string]time.Time)
269	laterRetriesMu  sync.Mutex
270)
271
272// checkRateLimit tracks the various requests to the git-server. If it is the first
273// request of its kind, then a times is started, that when it is finished, a certain
274// number of requests become available.
275func checkRateLimit(api, direction, repo, oid string) (seconds int, isWait bool) {
276	laterRetriesMu.Lock()
277	defer laterRetriesMu.Unlock()
278	key := strings.Join([]string{direction, repo, oid}, ":")
279	if requestsRemaining, ok := requestTokens[key]; !ok || requestsRemaining == 0 {
280		if retryStartTimes[key] == (time.Time{}) {
281			// If time is not initialized, set it to now
282			retryStartTimes[key] = time.Now()
283		}
284		// The user is not allowed to make a request now and must wait for the required
285		// time to pass.
286		secsPassed := time.Since(retryStartTimes[key]).Seconds()
287		if secsPassed >= float64(secondsToRefillTokens) {
288			// The required time has passed.
289			requestTokens[key] = refillTokenCount
290			return 0, false
291		}
292		return secondsToRefillTokens - int(secsPassed) + 1, true
293	}
294
295	requestTokens[key]--
296
297	// Tokens are now over, record time.
298	if requestTokens[key] == 0 {
299		retryStartTimes[key] = time.Now()
300	}
301
302	return 0, false
303}
304
305var (
306	retries   = make(map[string]uint32)
307	retriesMu sync.Mutex
308)
309
310func incrementRetriesFor(api, direction, repo, oid string, check bool) (after uint32, ok bool) {
311	// fmtStr formats a string like "<api>-<direction>-[check]-<retry>",
312	// i.e., "legacy-upload-check-retry", or "storage-download-retry".
313	var fmtStr string
314	if check {
315		fmtStr = "%s-%s-check-retry"
316	} else {
317		fmtStr = "%s-%s-retry"
318	}
319
320	if oidHandlers[oid] != fmt.Sprintf(fmtStr, api, direction) {
321		return 0, false
322	}
323
324	retriesMu.Lock()
325	defer retriesMu.Unlock()
326
327	retryKey := strings.Join([]string{direction, repo, oid}, ":")
328
329	retries[retryKey]++
330	retries := retries[retryKey]
331
332	return retries, true
333}
334
335func lfsDeleteHandler(w http.ResponseWriter, r *http.Request, id, repo string) {
336	parts := strings.Split(r.URL.Path, "/")
337	oid := parts[len(parts)-1]
338
339	largeObjects.Delete(repo, oid)
340	debug(id, "DELETE:", oid)
341	w.WriteHeader(200)
342}
343
344type batchReq struct {
345	Transfers []string    `json:"transfers"`
346	Operation string      `json:"operation"`
347	Objects   []lfsObject `json:"objects"`
348	Ref       *Ref        `json:"ref,omitempty"`
349}
350
351func (r *batchReq) RefName() string {
352	if r.Ref == nil {
353		return ""
354	}
355	return r.Ref.Name
356}
357
358type batchResp struct {
359	Transfer      string      `json:"transfer,omitempty"`
360	Objects       []lfsObject `json:"objects"`
361	HashAlgorithm string      `json:"hash_algo,omitempty"`
362}
363
364func lfsBatchHandler(w http.ResponseWriter, r *http.Request, id, repo string) {
365	checkingObject := r.Header.Get("X-Check-Object") == "1"
366	if !checkingObject && repo == "batchunsupported" {
367		w.WriteHeader(404)
368		return
369	}
370
371	if !checkingObject && repo == "badbatch" {
372		w.WriteHeader(203)
373		return
374	}
375
376	if repo == "netrctest" {
377		user, pass, err := extractAuth(r.Header.Get("Authorization"))
378		if err != nil || (user != "netrcuser" || pass != "netrcpass") {
379			w.WriteHeader(403)
380			return
381		}
382	}
383
384	if missingRequiredCreds(w, r, repo) {
385		return
386	}
387
388	buf := &bytes.Buffer{}
389	tee := io.TeeReader(r.Body, buf)
390	objs := &batchReq{}
391	err := json.NewDecoder(tee).Decode(objs)
392	io.Copy(ioutil.Discard, r.Body)
393	r.Body.Close()
394
395	debug(id, "REQUEST")
396	debug(id, buf.String())
397
398	if err != nil {
399		log.Fatal(err)
400	}
401
402	if strings.HasSuffix(repo, "branch-required") {
403		parts := strings.Split(repo, "-")
404		lenParts := len(parts)
405		if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != objs.RefName() {
406			w.WriteHeader(403)
407			json.NewEncoder(w).Encode(struct {
408				Message string `json:"message"`
409			}{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], objs.RefName())})
410			return
411		}
412	}
413
414	if strings.HasSuffix(repo, "batch-retry-later") {
415		if timeLeft, isWaiting := checkRateLimit("batch", "", repo, ""); isWaiting {
416			w.Header().Set("Retry-After", strconv.Itoa(timeLeft))
417			w.WriteHeader(http.StatusTooManyRequests)
418
419			w.Write([]byte("rate limit reached"))
420			fmt.Println("Setting header to: ", strconv.Itoa(timeLeft))
421			return
422		}
423	}
424
425	res := []lfsObject{}
426	testingChunked := testingChunkedTransferEncoding(r)
427	testingTus := testingTusUploadInBatchReq(r)
428	testingTusInterrupt := testingTusUploadInterruptedInBatchReq(r)
429	testingCustomTransfer := testingCustomTransfer(r)
430	var transferChoice string
431	var searchForTransfer string
432	hashAlgo := "sha256"
433	if testingTus {
434		searchForTransfer = "tus"
435	} else if testingCustomTransfer {
436		searchForTransfer = "testcustom"
437	}
438	if len(searchForTransfer) > 0 {
439		for _, t := range objs.Transfers {
440			if t == searchForTransfer {
441				transferChoice = searchForTransfer
442				break
443			}
444
445		}
446	}
447	for _, obj := range objs.Objects {
448		handler := oidHandlers[obj.Oid]
449		action := objs.Operation
450
451		o := lfsObject{
452			Size:    obj.Size,
453			Actions: make(map[string]*lfsLink),
454		}
455
456		// Clobber the OID if told to do so.
457		if handler == "unknown-oid" {
458			o.Oid = "unknown-oid"
459		} else {
460			o.Oid = obj.Oid
461		}
462
463		exists := largeObjects.Has(repo, obj.Oid)
464		addAction := true
465		if action == "download" {
466			if !exists {
467				o.Err = &lfsError{Code: 404, Message: fmt.Sprintf("Object %v does not exist", obj.Oid)}
468				addAction = false
469			}
470		} else {
471			if exists {
472				// not an error but don't add an action
473				addAction = false
474			}
475		}
476
477		if handler == "object-authenticated" {
478			o.Authenticated = true
479		}
480
481		switch handler {
482		case "status-batch-403":
483			o.Err = &lfsError{Code: 403, Message: "welp"}
484		case "status-batch-404":
485			o.Err = &lfsError{Code: 404, Message: "welp"}
486		case "status-batch-410":
487			o.Err = &lfsError{Code: 410, Message: "welp"}
488		case "status-batch-422":
489			o.Err = &lfsError{Code: 422, Message: "welp"}
490		case "status-batch-500":
491			o.Err = &lfsError{Code: 500, Message: "welp"}
492		default: // regular 200 response
493			if handler == "return-invalid-size" {
494				o.Size = -1
495			}
496
497			if handler == "batch-hash-algo-empty" {
498				hashAlgo = ""
499			} else if handler == "batch-hash-algo-invalid" {
500				hashAlgo = "invalid"
501			}
502
503			if handler == "send-deprecated-links" {
504				o.Links = make(map[string]*lfsLink)
505			}
506			if addAction {
507				a := &lfsLink{
508					Href:   lfsUrl(repo, obj.Oid, handler == "redirect-storage-upload"),
509					Header: map[string]string{},
510				}
511				a = serveExpired(a, repo, handler)
512
513				if handler == "send-deprecated-links" {
514					o.Links[action] = a
515				} else {
516					o.Actions[action] = a
517				}
518			}
519
520			if handler == "send-verify-action" {
521				o.Actions["verify"] = &lfsLink{
522					Href: server.URL + "/verify",
523					Header: map[string]string{
524						"repo": repo,
525					},
526				}
527			}
528		}
529
530		if testingChunked && addAction {
531			if handler == "send-deprecated-links" {
532				o.Links[action].Header["Transfer-Encoding"] = "chunked"
533			} else {
534				o.Actions[action].Header["Transfer-Encoding"] = "chunked"
535			}
536		}
537		if testingTusInterrupt && addAction {
538			if handler == "send-deprecated-links" {
539				o.Links[action].Header["Lfs-Tus-Interrupt"] = "true"
540			} else {
541				o.Actions[action].Header["Lfs-Tus-Interrupt"] = "true"
542			}
543		}
544
545		res = append(res, o)
546	}
547
548	ores := batchResp{HashAlgorithm: hashAlgo, Transfer: transferChoice, Objects: res}
549
550	by, err := json.Marshal(ores)
551	if err != nil {
552		log.Fatal(err)
553	}
554
555	debug(id, "RESPONSE: 200")
556	debug(id, string(by))
557
558	w.WriteHeader(200)
559	w.Write(by)
560}
561
562// emu guards expiredRepos
563var emu sync.Mutex
564
565// expiredRepos is a map keyed by repository name, valuing to whether or not it
566// has yet served an expired object.
567var expiredRepos = map[string]bool{}
568
569// serveExpired marks the given repo as having served an expired object, making
570// it unable for that same repository to return an expired object in the future,
571func serveExpired(a *lfsLink, repo, handler string) *lfsLink {
572	var (
573		dur = -5 * time.Minute
574		at  = time.Now().Add(dur)
575	)
576
577	if handler == "return-expired-action-forever" ||
578		(handler == "return-expired-action" && canServeExpired(repo)) {
579
580		emu.Lock()
581		expiredRepos[repo] = true
582		emu.Unlock()
583
584		a.ExpiresAt = at
585		return a
586	}
587
588	switch repo {
589	case "expired-absolute":
590		a.ExpiresAt = at
591	case "expired-relative":
592		a.ExpiresIn = -5
593	case "expired-both":
594		a.ExpiresAt = at
595		a.ExpiresIn = -5
596	}
597
598	return a
599}
600
601// canServeExpired returns whether or not a repository is capable of serving an
602// expired object. In other words, canServeExpired returns whether or not the
603// given repo has yet served an expired object.
604func canServeExpired(repo string) bool {
605	emu.Lock()
606	defer emu.Unlock()
607
608	return !expiredRepos[repo]
609}
610
611// Persistent state across requests
612var batchResumeFailFallbackStorageAttempts = 0
613var tusStorageAttempts = 0
614
615var (
616	vmu           sync.Mutex
617	verifyCounts  = make(map[string]int)
618	verifyRetryRe = regexp.MustCompile(`verify-fail-(\d+)-times?$`)
619)
620
621func verifyHandler(w http.ResponseWriter, r *http.Request) {
622	repo := r.Header.Get("repo")
623	var payload struct {
624		Oid  string `json:"oid"`
625		Size int64  `json:"size"`
626	}
627
628	if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
629		writeLFSError(w, http.StatusUnprocessableEntity, err.Error())
630		return
631	}
632
633	var max int
634	if matches := verifyRetryRe.FindStringSubmatch(repo); len(matches) < 2 {
635		return
636	} else {
637		max, _ = strconv.Atoi(matches[1])
638	}
639
640	key := strings.Join([]string{repo, payload.Oid}, ":")
641
642	vmu.Lock()
643	verifyCounts[key] = verifyCounts[key] + 1
644	count := verifyCounts[key]
645	vmu.Unlock()
646
647	if count < max {
648		writeLFSError(w, http.StatusServiceUnavailable, fmt.Sprintf(
649			"intentionally failing verify request %d (out of %d)", count, max,
650		))
651		return
652	}
653}
654
655// handles any /storage/{oid} requests
656func storageHandler(w http.ResponseWriter, r *http.Request) {
657	id, ok := reqId(w)
658	if !ok {
659		return
660	}
661	repo := r.URL.Query().Get("r")
662	parts := strings.Split(r.URL.Path, "/")
663	oid := parts[len(parts)-1]
664	if missingRequiredCreds(w, r, repo) {
665		return
666	}
667
668	debug(id, "storage %s %s repo: %s", r.Method, oid, repo)
669	switch r.Method {
670	case "PUT":
671		switch oidHandlers[oid] {
672		case "status-storage-403":
673			w.WriteHeader(403)
674			return
675		case "status-storage-404":
676			w.WriteHeader(404)
677			return
678		case "status-storage-410":
679			w.WriteHeader(410)
680			return
681		case "status-storage-422":
682			w.WriteHeader(422)
683			return
684		case "status-storage-500":
685			w.WriteHeader(500)
686			return
687		case "status-storage-503":
688			writeLFSError(w, 503, "LFS is temporarily unavailable")
689			return
690		case "object-authenticated":
691			if len(r.Header.Get("Authorization")) > 0 {
692				w.WriteHeader(400)
693				w.Write([]byte("Should not send authentication"))
694			}
695			return
696		case "storage-upload-retry":
697			if retries, ok := incrementRetriesFor("storage", "upload", repo, oid, false); ok && retries < 3 {
698				w.WriteHeader(500)
699				w.Write([]byte("malformed content"))
700
701				return
702			}
703		case "storage-upload-retry-later":
704			if timeLeft, isWaiting := checkRateLimit("storage", "upload", repo, oid); isWaiting {
705				w.Header().Set("Retry-After", strconv.Itoa(timeLeft))
706				w.WriteHeader(http.StatusTooManyRequests)
707
708				w.Write([]byte("rate limit reached"))
709				fmt.Println("Setting header to: ", strconv.Itoa(timeLeft))
710				return
711			}
712		case "storage-compress":
713			if r.Header.Get("Accept-Encoding") != "gzip" {
714				w.WriteHeader(500)
715				w.Write([]byte("not encoded"))
716				return
717			}
718		}
719
720		if testingChunkedTransferEncoding(r) {
721			valid := false
722			for _, value := range r.TransferEncoding {
723				if value == "chunked" {
724					valid = true
725					break
726				}
727			}
728			if !valid {
729				debug(id, "Chunked transfer encoding expected")
730			}
731		}
732
733		hash := sha256.New()
734		buf := &bytes.Buffer{}
735
736		io.Copy(io.MultiWriter(hash, buf), r.Body)
737		oid := hex.EncodeToString(hash.Sum(nil))
738		if !strings.HasSuffix(r.URL.Path, "/"+oid) {
739			w.WriteHeader(403)
740			return
741		}
742
743		largeObjects.Set(repo, oid, buf.Bytes())
744
745	case "GET":
746		parts := strings.Split(r.URL.Path, "/")
747		oid := parts[len(parts)-1]
748		statusCode := 200
749		byteLimit := 0
750		resumeAt := int64(0)
751		compress := false
752
753		if by, ok := largeObjects.Get(repo, oid); ok {
754			if len(by) == len("storage-download-retry-later") && string(by) == "storage-download-retry-later" {
755				if secsToWait, wait := checkRateLimit("storage", "download", repo, oid); wait {
756					statusCode = http.StatusTooManyRequests
757					w.Header().Set("Retry-After", strconv.Itoa(secsToWait))
758					by = []byte("rate limit reached")
759					fmt.Println("Setting header to: ", strconv.Itoa(secsToWait))
760				}
761			} else if len(by) == len("storage-download-retry") && string(by) == "storage-download-retry" {
762				if retries, ok := incrementRetriesFor("storage", "download", repo, oid, false); ok && retries < 3 {
763					statusCode = 500
764					by = []byte("malformed content")
765				}
766			} else if len(by) == len("storage-compress") && string(by) == "storage-compress" {
767				if r.Header.Get("Accept-Encoding") != "gzip" {
768					statusCode = 500
769					by = []byte("not encoded")
770				} else {
771					compress = true
772				}
773			} else if len(by) == len("status-batch-resume-206") && string(by) == "status-batch-resume-206" {
774				// Resume if header includes range, otherwise deliberately interrupt
775				if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
776					regex := regexp.MustCompile(`bytes=(\d+)\-.*`)
777					match := regex.FindStringSubmatch(rangeHdr)
778					if match != nil && len(match) > 1 {
779						statusCode = 206
780						resumeAt, _ = strconv.ParseInt(match[1], 10, 32)
781						w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by))))
782					}
783				} else {
784					byteLimit = 10
785				}
786			} else if len(by) == len("batch-resume-fail-fallback") && string(by) == "batch-resume-fail-fallback" {
787				// Fail any Range: request even though we said we supported it
788				// To make sure client can fall back
789				if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
790					w.WriteHeader(416)
791					return
792				}
793				if batchResumeFailFallbackStorageAttempts == 0 {
794					// Truncate output on FIRST attempt to cause resume
795					// Second attempt (without range header) is fallback, complete successfully
796					byteLimit = 8
797					batchResumeFailFallbackStorageAttempts++
798				}
799			} else if string(by) == "status-batch-retry" {
800				if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
801					regex := regexp.MustCompile(`bytes=(\d+)\-(.*)`)
802					match := regex.FindStringSubmatch(rangeHdr)
803					// We have a Range header with two
804					// non-empty values.
805					if match != nil && len(match) > 2 && len(match[2]) != 0 {
806						first, _ := strconv.ParseInt(match[1], 10, 32)
807						second, _ := strconv.ParseInt(match[2], 10, 32)
808						// The second part of the range
809						// is smaller than the first
810						// part (or the latter part of
811						// the range is non-integral).
812						// This is invalid; reject it.
813						if second < first {
814							w.WriteHeader(400)
815							return
816						}
817						// The range is valid; we'll
818						// take the branch below.
819					}
820					// We got a valid range header, so
821					// provide a 206 Partial Content. We
822					// ignore the upper bound if one was
823					// provided.
824					if match != nil && len(match) > 1 {
825						statusCode = 206
826						resumeAt, _ = strconv.ParseInt(match[1], 10, 32)
827						w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by))))
828					}
829				}
830			}
831			var wrtr io.Writer = w
832			if compress {
833				w.Header().Set("Content-Encoding", "gzip")
834				gz := gzip.NewWriter(w)
835				defer gz.Close()
836
837				wrtr = gz
838			}
839			w.WriteHeader(statusCode)
840			if byteLimit > 0 {
841				wrtr.Write(by[0:byteLimit])
842			} else if resumeAt > 0 {
843				wrtr.Write(by[resumeAt:])
844			} else {
845				wrtr.Write(by)
846			}
847			return
848		}
849
850		w.WriteHeader(404)
851	case "HEAD":
852		// tus.io
853		if !validateTusHeaders(r, id) {
854			w.WriteHeader(400)
855			return
856		}
857		parts := strings.Split(r.URL.Path, "/")
858		oid := parts[len(parts)-1]
859		var offset int64
860		if by, ok := largeObjects.GetIncomplete(repo, oid); ok {
861			offset = int64(len(by))
862		}
863		w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
864		w.WriteHeader(200)
865	case "PATCH":
866		// tus.io
867		if !validateTusHeaders(r, id) {
868			w.WriteHeader(400)
869			return
870		}
871		parts := strings.Split(r.URL.Path, "/")
872		oid := parts[len(parts)-1]
873
874		offsetHdr := r.Header.Get("Upload-Offset")
875		offset, err := strconv.ParseInt(offsetHdr, 10, 64)
876		if err != nil {
877			log.Fatal("Unable to parse Upload-Offset header in request: ", err)
878			w.WriteHeader(400)
879			return
880		}
881		hash := sha256.New()
882		buf := &bytes.Buffer{}
883		out := io.MultiWriter(hash, buf)
884
885		if by, ok := largeObjects.GetIncomplete(repo, oid); ok {
886			if offset != int64(len(by)) {
887				log.Fatal(fmt.Sprintf("Incorrect offset in request, got %d expected %d", offset, len(by)))
888				w.WriteHeader(400)
889				return
890			}
891			_, err := out.Write(by)
892			if err != nil {
893				log.Fatal("Error reading incomplete bytes from store: ", err)
894				w.WriteHeader(500)
895				return
896			}
897			largeObjects.DeleteIncomplete(repo, oid)
898			debug(id, "Resuming upload of %v at byte %d", oid, offset)
899		}
900
901		// As a test, we intentionally break the upload from byte 0 by only
902		// reading some bytes the quitting & erroring, this forces a resume
903		// any offset > 0 will work ok
904		var copyErr error
905		if r.Header.Get("Lfs-Tus-Interrupt") == "true" && offset == 0 {
906			chdr := r.Header.Get("Content-Length")
907			contentLen, err := strconv.ParseInt(chdr, 10, 64)
908			if err != nil {
909				log.Fatal(fmt.Sprintf("Invalid Content-Length %q", chdr))
910				w.WriteHeader(400)
911				return
912			}
913			truncated := contentLen / 3
914			_, _ = io.CopyN(out, r.Body, truncated)
915			r.Body.Close()
916			copyErr = fmt.Errorf("Simulated copy error")
917		} else {
918			_, copyErr = io.Copy(out, r.Body)
919		}
920		if copyErr != nil {
921			b := buf.Bytes()
922			if len(b) > 0 {
923				debug(id, "Incomplete upload of %v, %d bytes", oid, len(b))
924				largeObjects.SetIncomplete(repo, oid, b)
925			}
926			w.WriteHeader(500)
927		} else {
928			checkoid := hex.EncodeToString(hash.Sum(nil))
929			if checkoid != oid {
930				log.Fatal(fmt.Sprintf("Incorrect oid after calculation, got %q expected %q", checkoid, oid))
931				w.WriteHeader(403)
932				return
933			}
934
935			b := buf.Bytes()
936			largeObjects.Set(repo, oid, b)
937			w.Header().Set("Upload-Offset", strconv.FormatInt(int64(len(b)), 10))
938			w.WriteHeader(204)
939		}
940
941	default:
942		w.WriteHeader(405)
943	}
944}
945
946func validateTusHeaders(r *http.Request, id string) bool {
947	if len(r.Header.Get("Tus-Resumable")) == 0 {
948		debug(id, "Missing Tus-Resumable header in request")
949		return false
950	}
951	return true
952}
953
954func gitHandler(w http.ResponseWriter, r *http.Request) {
955	defer func() {
956		io.Copy(ioutil.Discard, r.Body)
957		r.Body.Close()
958	}()
959
960	cmd := exec.Command("git", "http-backend")
961	cmd.Env = []string{
962		fmt.Sprintf("GIT_PROJECT_ROOT=%s", repoDir),
963		fmt.Sprintf("GIT_HTTP_EXPORT_ALL="),
964		fmt.Sprintf("PATH_INFO=%s", r.URL.Path),
965		fmt.Sprintf("QUERY_STRING=%s", r.URL.RawQuery),
966		fmt.Sprintf("REQUEST_METHOD=%s", r.Method),
967		fmt.Sprintf("CONTENT_TYPE=%s", r.Header.Get("Content-Type")),
968	}
969
970	buffer := &bytes.Buffer{}
971	cmd.Stdin = r.Body
972	cmd.Stdout = buffer
973	cmd.Stderr = os.Stderr
974
975	if err := cmd.Run(); err != nil {
976		log.Fatal(err)
977	}
978
979	text := textproto.NewReader(bufio.NewReader(buffer))
980
981	code, _, _ := text.ReadCodeLine(-1)
982
983	if code != 0 {
984		w.WriteHeader(code)
985	}
986
987	headers, _ := text.ReadMIMEHeader()
988	head := w.Header()
989	for key, values := range headers {
990		for _, value := range values {
991			head.Add(key, value)
992		}
993	}
994
995	io.Copy(w, text.R)
996}
997
998func redirect307Handler(w http.ResponseWriter, r *http.Request) {
999	id, ok := reqId(w)
1000	if !ok {
1001		return
1002	}
1003
1004	// Send a redirect to info/lfs
1005	// Make it either absolute or relative depending on subpath
1006	parts := strings.Split(r.URL.Path, "/")
1007	// first element is always blank since rooted
1008	var redirectTo string
1009	if parts[2] == "rel" {
1010		redirectTo = "/" + strings.Join(parts[3:], "/")
1011	} else if parts[2] == "abs" {
1012		redirectTo = server.URL + "/" + strings.Join(parts[3:], "/")
1013	} else if parts[2] == "objects" {
1014		repo := r.URL.Query().Get("r")
1015		redirectTo = server.URL + "/storage/" + strings.Join(parts[3:], "/") + "?r=" + repo
1016	} else {
1017		debug(id, "Invalid URL for redirect: %v", r.URL)
1018		w.WriteHeader(404)
1019		return
1020	}
1021	w.Header().Set("Location", redirectTo)
1022	w.WriteHeader(307)
1023}
1024
1025type User struct {
1026	Name string `json:"name"`
1027}
1028
1029type Lock struct {
1030	Id       string    `json:"id"`
1031	Path     string    `json:"path"`
1032	Owner    User      `json:"owner"`
1033	LockedAt time.Time `json:"locked_at"`
1034}
1035
1036type LockRequest struct {
1037	Path string `json:"path"`
1038	Ref  *Ref   `json:"ref,omitempty"`
1039}
1040
1041func (r *LockRequest) RefName() string {
1042	if r.Ref == nil {
1043		return ""
1044	}
1045	return r.Ref.Name
1046}
1047
1048type LockResponse struct {
1049	Lock    *Lock  `json:"lock"`
1050	Message string `json:"message,omitempty"`
1051}
1052
1053type UnlockRequest struct {
1054	Force bool `json:"force"`
1055	Ref   *Ref `json:"ref,omitempty"`
1056}
1057
1058func (r *UnlockRequest) RefName() string {
1059	if r.Ref == nil {
1060		return ""
1061	}
1062	return r.Ref.Name
1063}
1064
1065type UnlockResponse struct {
1066	Lock    *Lock  `json:"lock"`
1067	Message string `json:"message,omitempty"`
1068}
1069
1070type LockList struct {
1071	Locks      []Lock `json:"locks"`
1072	NextCursor string `json:"next_cursor,omitempty"`
1073	Message    string `json:"message,omitempty"`
1074}
1075
1076type Ref struct {
1077	Name string `json:"name,omitempty"`
1078}
1079
1080type VerifiableLockRequest struct {
1081	Ref    *Ref   `json:"ref,omitempty"`
1082	Cursor string `json:"cursor,omitempty"`
1083	Limit  int    `json:"limit,omitempty"`
1084}
1085
1086func (r *VerifiableLockRequest) RefName() string {
1087	if r.Ref == nil {
1088		return ""
1089	}
1090	return r.Ref.Name
1091}
1092
1093type VerifiableLockList struct {
1094	Ours       []Lock `json:"ours"`
1095	Theirs     []Lock `json:"theirs"`
1096	NextCursor string `json:"next_cursor,omitempty"`
1097	Message    string `json:"message,omitempty"`
1098}
1099
1100var (
1101	lmu       sync.RWMutex
1102	repoLocks = map[string][]Lock{}
1103)
1104
1105func addLocks(repo string, l ...Lock) {
1106	lmu.Lock()
1107	defer lmu.Unlock()
1108	repoLocks[repo] = append(repoLocks[repo], l...)
1109	sort.Sort(LocksByCreatedAt(repoLocks[repo]))
1110}
1111
1112func getLocks(repo string) []Lock {
1113	lmu.RLock()
1114	defer lmu.RUnlock()
1115
1116	locks := repoLocks[repo]
1117	cp := make([]Lock, len(locks))
1118	for i, l := range locks {
1119		cp[i] = l
1120	}
1121
1122	return cp
1123}
1124
1125func getFilteredLocks(repo, path, cursor, limit string) ([]Lock, string, error) {
1126	locks := getLocks(repo)
1127	if cursor != "" {
1128		lastSeen := -1
1129		for i, l := range locks {
1130			if l.Id == cursor {
1131				lastSeen = i
1132				break
1133			}
1134		}
1135
1136		if lastSeen > -1 {
1137			locks = locks[lastSeen:]
1138		} else {
1139			return nil, "", fmt.Errorf("cursor (%s) not found", cursor)
1140		}
1141	}
1142
1143	if path != "" {
1144		var filtered []Lock
1145		for _, l := range locks {
1146			if l.Path == path {
1147				filtered = append(filtered, l)
1148			}
1149		}
1150
1151		locks = filtered
1152	}
1153
1154	if limit != "" {
1155		size, err := strconv.Atoi(limit)
1156		if err != nil {
1157			return nil, "", errors.New("unable to parse limit amount")
1158		}
1159
1160		size = int(math.Min(float64(len(locks)), 3))
1161		if size < 0 {
1162			return nil, "", nil
1163		}
1164
1165		if size+1 < len(locks) {
1166			return locks[:size], locks[size+1].Id, nil
1167		}
1168	}
1169
1170	return locks, "", nil
1171}
1172
1173func delLock(repo string, id string) *Lock {
1174	lmu.RLock()
1175	defer lmu.RUnlock()
1176
1177	var deleted *Lock
1178	locks := make([]Lock, 0, len(repoLocks[repo]))
1179	for _, l := range repoLocks[repo] {
1180		if l.Id == id {
1181			deleted = &l
1182			continue
1183		}
1184		locks = append(locks, l)
1185	}
1186	repoLocks[repo] = locks
1187	return deleted
1188}
1189
1190type LocksByCreatedAt []Lock
1191
1192func (c LocksByCreatedAt) Len() int           { return len(c) }
1193func (c LocksByCreatedAt) Less(i, j int) bool { return c[i].LockedAt.Before(c[j].LockedAt) }
1194func (c LocksByCreatedAt) Swap(i, j int)      { c[i], c[j] = c[j], c[i] }
1195
1196var (
1197	lockRe   = regexp.MustCompile(`/locks/?$`)
1198	unlockRe = regexp.MustCompile(`locks/([^/]+)/unlock\z`)
1199)
1200
1201func locksHandler(w http.ResponseWriter, r *http.Request, repo string) {
1202	dec := json.NewDecoder(r.Body)
1203	enc := json.NewEncoder(w)
1204
1205	if repo == "netrctest" {
1206		user, pass, err := extractAuth(r.Header.Get("Authorization"))
1207		if err != nil || (user == "netrcuser" && pass == "badpassretry") {
1208			writeLFSError(w, 401, "Error: Bad Auth")
1209			return
1210		}
1211	}
1212
1213	switch r.Method {
1214	case "GET":
1215		if !lockRe.MatchString(r.URL.Path) {
1216			w.Header().Set("Content-Type", "application/json")
1217			w.WriteHeader(http.StatusNotFound)
1218			w.Write([]byte(`{"message":"unknown path: ` + r.URL.Path + `"}`))
1219			return
1220		}
1221
1222		if err := r.ParseForm(); err != nil {
1223			http.Error(w, "could not parse form values", http.StatusInternalServerError)
1224			return
1225		}
1226
1227		if strings.HasSuffix(repo, "branch-required") {
1228			parts := strings.Split(repo, "-")
1229			lenParts := len(parts)
1230			if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != r.FormValue("refspec") {
1231				w.WriteHeader(403)
1232				enc.Encode(struct {
1233					Message string `json:"message"`
1234				}{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], r.FormValue("refspec"))})
1235				return
1236			}
1237		}
1238
1239		ll := &LockList{}
1240		w.Header().Set("Content-Type", "application/json")
1241		locks, nextCursor, err := getFilteredLocks(repo,
1242			r.FormValue("path"),
1243			r.FormValue("cursor"),
1244			r.FormValue("limit"))
1245
1246		if err != nil {
1247			ll.Message = err.Error()
1248		} else {
1249			ll.Locks = locks
1250			ll.NextCursor = nextCursor
1251		}
1252
1253		enc.Encode(ll)
1254		return
1255	case "POST":
1256		w.Header().Set("Content-Type", "application/json")
1257		if strings.HasSuffix(r.URL.Path, "unlock") {
1258			var lockId string
1259			if matches := unlockRe.FindStringSubmatch(r.URL.Path); len(matches) > 1 {
1260				lockId = matches[1]
1261			}
1262
1263			if len(lockId) == 0 {
1264				enc.Encode(&UnlockResponse{Message: "Invalid lock"})
1265			}
1266
1267			unlockRequest := &UnlockRequest{}
1268			if err := dec.Decode(unlockRequest); err != nil {
1269				enc.Encode(&UnlockResponse{Message: err.Error()})
1270				return
1271			}
1272
1273			if strings.HasSuffix(repo, "branch-required") {
1274				parts := strings.Split(repo, "-")
1275				lenParts := len(parts)
1276				if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != unlockRequest.RefName() {
1277					w.WriteHeader(403)
1278					enc.Encode(struct {
1279						Message string `json:"message"`
1280					}{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], unlockRequest.RefName())})
1281					return
1282				}
1283			}
1284
1285			if l := delLock(repo, lockId); l != nil {
1286				enc.Encode(&UnlockResponse{Lock: l})
1287			} else {
1288				enc.Encode(&UnlockResponse{Message: "unable to find lock"})
1289			}
1290			return
1291		}
1292
1293		if strings.HasSuffix(r.URL.Path, "/locks/verify") {
1294			if strings.HasSuffix(repo, "verify-5xx") {
1295				w.WriteHeader(500)
1296				return
1297			}
1298			if strings.HasSuffix(repo, "verify-501") {
1299				w.WriteHeader(501)
1300				return
1301			}
1302			if strings.HasSuffix(repo, "verify-403") {
1303				w.WriteHeader(403)
1304				return
1305			}
1306
1307			switch repo {
1308			case "pre_push_locks_verify_404":
1309				w.WriteHeader(http.StatusNotFound)
1310				w.Write([]byte(`{"message":"pre_push_locks_verify_404"}`))
1311				return
1312			case "pre_push_locks_verify_410":
1313				w.WriteHeader(http.StatusGone)
1314				w.Write([]byte(`{"message":"pre_push_locks_verify_410"}`))
1315				return
1316			}
1317
1318			reqBody := &VerifiableLockRequest{}
1319			if err := dec.Decode(reqBody); err != nil {
1320				w.WriteHeader(http.StatusBadRequest)
1321				enc.Encode(struct {
1322					Message string `json:"message"`
1323				}{"json decode error: " + err.Error()})
1324				return
1325			}
1326
1327			if strings.HasSuffix(repo, "branch-required") {
1328				parts := strings.Split(repo, "-")
1329				lenParts := len(parts)
1330				if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != reqBody.RefName() {
1331					w.WriteHeader(403)
1332					enc.Encode(struct {
1333						Message string `json:"message"`
1334					}{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], reqBody.RefName())})
1335					return
1336				}
1337			}
1338
1339			ll := &VerifiableLockList{}
1340			locks, nextCursor, err := getFilteredLocks(repo, "",
1341				reqBody.Cursor,
1342				strconv.Itoa(reqBody.Limit))
1343			if err != nil {
1344				ll.Message = err.Error()
1345			} else {
1346				ll.NextCursor = nextCursor
1347
1348				for _, l := range locks {
1349					if strings.Contains(l.Path, "theirs") {
1350						ll.Theirs = append(ll.Theirs, l)
1351					} else {
1352						ll.Ours = append(ll.Ours, l)
1353					}
1354				}
1355			}
1356
1357			enc.Encode(ll)
1358			return
1359		}
1360
1361		if strings.HasSuffix(r.URL.Path, "/locks") {
1362			lockRequest := &LockRequest{}
1363			if err := dec.Decode(lockRequest); err != nil {
1364				enc.Encode(&LockResponse{Message: err.Error()})
1365			}
1366
1367			if strings.HasSuffix(repo, "branch-required") {
1368				parts := strings.Split(repo, "-")
1369				lenParts := len(parts)
1370				if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != lockRequest.RefName() {
1371					w.WriteHeader(403)
1372					enc.Encode(struct {
1373						Message string `json:"message"`
1374					}{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], lockRequest.RefName())})
1375					return
1376				}
1377			}
1378
1379			for _, l := range getLocks(repo) {
1380				if l.Path == lockRequest.Path {
1381					enc.Encode(&LockResponse{Message: "lock already created"})
1382					return
1383				}
1384			}
1385
1386			var id [20]byte
1387			rand.Read(id[:])
1388
1389			lock := &Lock{
1390				Id:       fmt.Sprintf("%x", id[:]),
1391				Path:     lockRequest.Path,
1392				Owner:    User{Name: "Git LFS Tests"},
1393				LockedAt: time.Now(),
1394			}
1395
1396			addLocks(repo, *lock)
1397
1398			// TODO(taylor): commit_needed case
1399			// TODO(taylor): err case
1400
1401			enc.Encode(&LockResponse{
1402				Lock: lock,
1403			})
1404			return
1405		}
1406	}
1407
1408	http.NotFound(w, r)
1409}
1410
1411func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) bool {
1412	if !strings.HasPrefix(repo, "requirecreds") {
1413		return false
1414	}
1415
1416	auth := r.Header.Get("Authorization")
1417	if len(auth) == 0 {
1418		writeLFSError(w, 401, "Error: Authorization Required")
1419		return true
1420	}
1421
1422	user, pass, err := extractAuth(auth)
1423	if err != nil {
1424		writeLFSError(w, 403, err.Error())
1425		return true
1426	}
1427
1428	if user != "requirecreds" || pass != "pass" {
1429		writeLFSError(w, 403, fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass))
1430		return true
1431	}
1432
1433	return false
1434}
1435
1436func testingChunkedTransferEncoding(r *http.Request) bool {
1437	return strings.HasPrefix(r.URL.String(), "/test-chunked-transfer-encoding")
1438}
1439
1440func testingTusUploadInBatchReq(r *http.Request) bool {
1441	return strings.HasPrefix(r.URL.String(), "/test-tus-upload")
1442}
1443func testingTusUploadInterruptedInBatchReq(r *http.Request) bool {
1444	return strings.HasPrefix(r.URL.String(), "/test-tus-upload-interrupt")
1445}
1446func testingCustomTransfer(r *http.Request) bool {
1447	return strings.HasPrefix(r.URL.String(), "/test-custom-transfer")
1448}
1449
1450var lfsUrlRE = regexp.MustCompile(`\A/?([^/]+)/info/lfs`)
1451
1452func repoFromLfsUrl(urlpath string) (string, error) {
1453	matches := lfsUrlRE.FindStringSubmatch(urlpath)
1454	if len(matches) != 2 {
1455		return "", fmt.Errorf("LFS url '%s' does not match %v", urlpath, lfsUrlRE)
1456	}
1457
1458	repo := matches[1]
1459	if strings.HasSuffix(repo, ".git") {
1460		return repo[0 : len(repo)-4], nil
1461	}
1462	return repo, nil
1463}
1464
1465type lfsStorage struct {
1466	objects    map[string]map[string][]byte
1467	incomplete map[string]map[string][]byte
1468	mutex      *sync.Mutex
1469}
1470
1471func (s *lfsStorage) Get(repo, oid string) ([]byte, bool) {
1472	s.mutex.Lock()
1473	defer s.mutex.Unlock()
1474	repoObjects, ok := s.objects[repo]
1475	if !ok {
1476		return nil, ok
1477	}
1478
1479	by, ok := repoObjects[oid]
1480	return by, ok
1481}
1482
1483func (s *lfsStorage) Has(repo, oid string) bool {
1484	s.mutex.Lock()
1485	defer s.mutex.Unlock()
1486	repoObjects, ok := s.objects[repo]
1487	if !ok {
1488		return false
1489	}
1490
1491	_, ok = repoObjects[oid]
1492	return ok
1493}
1494
1495func (s *lfsStorage) Set(repo, oid string, by []byte) {
1496	s.mutex.Lock()
1497	defer s.mutex.Unlock()
1498	repoObjects, ok := s.objects[repo]
1499	if !ok {
1500		repoObjects = make(map[string][]byte)
1501		s.objects[repo] = repoObjects
1502	}
1503	repoObjects[oid] = by
1504}
1505
1506func (s *lfsStorage) Delete(repo, oid string) {
1507	s.mutex.Lock()
1508	defer s.mutex.Unlock()
1509	repoObjects, ok := s.objects[repo]
1510	if ok {
1511		delete(repoObjects, oid)
1512	}
1513}
1514
1515func (s *lfsStorage) GetIncomplete(repo, oid string) ([]byte, bool) {
1516	s.mutex.Lock()
1517	defer s.mutex.Unlock()
1518	repoObjects, ok := s.incomplete[repo]
1519	if !ok {
1520		return nil, ok
1521	}
1522
1523	by, ok := repoObjects[oid]
1524	return by, ok
1525}
1526
1527func (s *lfsStorage) SetIncomplete(repo, oid string, by []byte) {
1528	s.mutex.Lock()
1529	defer s.mutex.Unlock()
1530	repoObjects, ok := s.incomplete[repo]
1531	if !ok {
1532		repoObjects = make(map[string][]byte)
1533		s.incomplete[repo] = repoObjects
1534	}
1535	repoObjects[oid] = by
1536}
1537
1538func (s *lfsStorage) DeleteIncomplete(repo, oid string) {
1539	s.mutex.Lock()
1540	defer s.mutex.Unlock()
1541	repoObjects, ok := s.incomplete[repo]
1542	if ok {
1543		delete(repoObjects, oid)
1544	}
1545}
1546
1547func newLfsStorage() *lfsStorage {
1548	return &lfsStorage{
1549		objects:    make(map[string]map[string][]byte),
1550		incomplete: make(map[string]map[string][]byte),
1551		mutex:      &sync.Mutex{},
1552	}
1553}
1554
1555func extractAuth(auth string) (string, string, error) {
1556	if strings.HasPrefix(auth, "Basic ") {
1557		decodeBy, err := base64.StdEncoding.DecodeString(auth[6:len(auth)])
1558		decoded := string(decodeBy)
1559
1560		if err != nil {
1561			return "", "", err
1562		}
1563
1564		parts := strings.SplitN(decoded, ":", 2)
1565		if len(parts) == 2 {
1566			return parts[0], parts[1], nil
1567		}
1568		return "", "", nil
1569	}
1570
1571	return "", "", nil
1572}
1573
1574func skipIfNoCookie(w http.ResponseWriter, r *http.Request, id string) bool {
1575	cookie := r.Header.Get("Cookie")
1576	if strings.Contains(cookie, "secret") {
1577		return false
1578	}
1579
1580	w.WriteHeader(403)
1581	debug(id, "No cookie received: %q", r.URL.Path)
1582	return true
1583}
1584
1585func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string) bool {
1586	auth := r.Header.Get("Authorization")
1587	if auth == "" {
1588		w.WriteHeader(401)
1589		return true
1590	}
1591
1592	user, pass, err := extractAuth(auth)
1593	if err != nil {
1594		w.WriteHeader(403)
1595		debug(id, "Error decoding auth: %s", err)
1596		return true
1597	}
1598
1599	switch user {
1600	case "user":
1601		if pass == "pass" {
1602			return false
1603		}
1604	case "netrcuser", "requirecreds":
1605		return false
1606	case "path":
1607		if strings.HasPrefix(r.URL.Path, "/"+pass) {
1608			return false
1609		}
1610		debug(id, "auth attempt against: %q", r.URL.Path)
1611	}
1612
1613	w.WriteHeader(403)
1614	debug(id, "Bad auth: %q", auth)
1615	return true
1616}
1617
1618func init() {
1619	oidHandlers = make(map[string]string)
1620	for _, content := range contentHandlers {
1621		h := sha256.New()
1622		h.Write([]byte(content))
1623		oidHandlers[hex.EncodeToString(h.Sum(nil))] = content
1624	}
1625}
1626
1627func debug(reqid, msg string, args ...interface{}) {
1628	fullargs := make([]interface{}, len(args)+1)
1629	fullargs[0] = reqid
1630	for i, a := range args {
1631		fullargs[i+1] = a
1632	}
1633	log.Printf("[%s] "+msg+"\n", fullargs...)
1634}
1635
1636func reqId(w http.ResponseWriter) (string, bool) {
1637	b := make([]byte, 16)
1638	_, err := rand.Read(b)
1639	if err != nil {
1640		http.Error(w, "error generating id: "+err.Error(), 500)
1641		return "", false
1642	}
1643	return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]), true
1644}
1645
1646// https://ericchiang.github.io/post/go-tls/
1647func generateCARootCertificates() (rootKey *rsa.PrivateKey, rootCert *x509.Certificate) {
1648
1649	// generate a new key-pair
1650	rootKey, err := rsa.GenerateKey(rand.Reader, 2048)
1651	if err != nil {
1652		log.Fatalf("generating random key: %v", err)
1653	}
1654
1655	rootCertTmpl, err := CertTemplate()
1656	if err != nil {
1657		log.Fatalf("creating cert template: %v", err)
1658	}
1659	// describe what the certificate will be used for
1660	rootCertTmpl.IsCA = true
1661	rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature
1662	rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
1663	//	rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")}
1664
1665	rootCert, _, err = CreateCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey)
1666
1667	return
1668}
1669
1670func generateClientCertificates(rootCert *x509.Certificate, rootKey interface{}) (clientKey *rsa.PrivateKey, clientCertPEM []byte, clientKeyPEM []byte, clientKeyEncPEM []byte) {
1671
1672	// create a key-pair for the client
1673	clientKey, err := rsa.GenerateKey(rand.Reader, 2048)
1674	if err != nil {
1675		log.Fatalf("generating random key: %v", err)
1676	}
1677
1678	// create a template for the client
1679	clientCertTmpl, err1 := CertTemplate()
1680	if err1 != nil {
1681		log.Fatalf("creating cert template: %v", err1)
1682	}
1683	clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature
1684	clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
1685
1686	// the root cert signs the cert by again providing its private key
1687	_, clientCertPEM, err2 := CreateCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey)
1688	if err2 != nil {
1689		log.Fatalf("error creating cert: %v", err2)
1690	}
1691
1692	privKey := x509.MarshalPKCS1PrivateKey(clientKey)
1693
1694	// encode and load the cert and private key for the client
1695	clientKeyPEM = pem.EncodeToMemory(&pem.Block{
1696		Type: "RSA PRIVATE KEY", Bytes: privKey,
1697	})
1698
1699	clientKeyEnc, err := x509.EncryptPEMBlock(bytes.NewBuffer(privKey), "RSA PRIVATE KEY", privKey, ([]byte)("pass"), x509.PEMCipherAES128)
1700	if err != nil {
1701		log.Fatalf("creating encrypted private key: %v", err)
1702	}
1703	clientKeyEncPEM = pem.EncodeToMemory(clientKeyEnc)
1704
1705	return
1706}
1707
1708// helper function to create a cert template with a serial number and other required fields
1709func CertTemplate() (*x509.Certificate, error) {
1710	// generate a random serial number (a real cert authority would have some logic behind this)
1711	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
1712	serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
1713	if err != nil {
1714		return nil, errors.New("failed to generate serial number: " + err.Error())
1715	}
1716
1717	tmpl := x509.Certificate{
1718		SerialNumber:          serialNumber,
1719		Subject:               pkix.Name{Organization: []string{"Yhat, Inc."}},
1720		SignatureAlgorithm:    x509.SHA256WithRSA,
1721		NotBefore:             time.Now(),
1722		NotAfter:              time.Now().Add(time.Hour), // valid for an hour
1723		BasicConstraintsValid: true,
1724	}
1725	return &tmpl, nil
1726}
1727
1728func CreateCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) (
1729	cert *x509.Certificate, certPEM []byte, err error) {
1730
1731	certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv)
1732	if err != nil {
1733		return
1734	}
1735	// parse the resulting certificate so we can use it again
1736	cert, err = x509.ParseCertificate(certDER)
1737	if err != nil {
1738		return
1739	}
1740	// PEM encode the certificate (this is a standard TLS encoding)
1741	b := pem.Block{Type: "CERTIFICATE", Bytes: certDER}
1742	certPEM = pem.EncodeToMemory(&b)
1743	return
1744}
1745