1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package bytes implements functions for the manipulation of byte slices.
6// It is analogous to the facilities of the strings package.
7package bytes
8
9import (
10	"internal/bytealg"
11	"unicode"
12	"unicode/utf8"
13)
14
15// Equal reports whether a and b
16// are the same length and contain the same bytes.
17// A nil argument is equivalent to an empty slice.
18func Equal(a, b []byte) bool {
19	// Neither cmd/compile nor gccgo allocates for these string conversions.
20	return string(a) == string(b)
21}
22
23// Compare returns an integer comparing two byte slices lexicographically.
24// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
25// A nil argument is equivalent to an empty slice.
26func Compare(a, b []byte) int {
27	return bytealg.Compare(a, b)
28}
29
30// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
31// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
32func explode(s []byte, n int) [][]byte {
33	if n <= 0 {
34		n = len(s)
35	}
36	a := make([][]byte, n)
37	var size int
38	na := 0
39	for len(s) > 0 {
40		if na+1 >= n {
41			a[na] = s
42			na++
43			break
44		}
45		_, size = utf8.DecodeRune(s)
46		a[na] = s[0:size:size]
47		s = s[size:]
48		na++
49	}
50	return a[0:na]
51}
52
53// Count counts the number of non-overlapping instances of sep in s.
54// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
55func Count(s, sep []byte) int {
56	// special case
57	if len(sep) == 0 {
58		return utf8.RuneCount(s) + 1
59	}
60	if len(sep) == 1 {
61		return bytealg.Count(s, sep[0])
62	}
63	n := 0
64	for {
65		i := Index(s, sep)
66		if i == -1 {
67			return n
68		}
69		n++
70		s = s[i+len(sep):]
71	}
72}
73
74// Contains reports whether subslice is within b.
75func Contains(b, subslice []byte) bool {
76	return Index(b, subslice) != -1
77}
78
79// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
80func ContainsAny(b []byte, chars string) bool {
81	return IndexAny(b, chars) >= 0
82}
83
84// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
85func ContainsRune(b []byte, r rune) bool {
86	return IndexRune(b, r) >= 0
87}
88
89// IndexByte returns the index of the first instance of c in b, or -1 if c is not present in b.
90func IndexByte(b []byte, c byte) int {
91	return bytealg.IndexByte(b, c)
92}
93
94func indexBytePortable(s []byte, c byte) int {
95	for i, b := range s {
96		if b == c {
97			return i
98		}
99	}
100	return -1
101}
102
103// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
104func LastIndex(s, sep []byte) int {
105	n := len(sep)
106	switch {
107	case n == 0:
108		return len(s)
109	case n == 1:
110		return LastIndexByte(s, sep[0])
111	case n == len(s):
112		if Equal(s, sep) {
113			return 0
114		}
115		return -1
116	case n > len(s):
117		return -1
118	}
119	// Rabin-Karp search from the end of the string
120	hashss, pow := bytealg.HashStrRevBytes(sep)
121	last := len(s) - n
122	var h uint32
123	for i := len(s) - 1; i >= last; i-- {
124		h = h*bytealg.PrimeRK + uint32(s[i])
125	}
126	if h == hashss && Equal(s[last:], sep) {
127		return last
128	}
129	for i := last - 1; i >= 0; i-- {
130		h *= bytealg.PrimeRK
131		h += uint32(s[i])
132		h -= pow * uint32(s[i+n])
133		if h == hashss && Equal(s[i:i+n], sep) {
134			return i
135		}
136	}
137	return -1
138}
139
140// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
141func LastIndexByte(s []byte, c byte) int {
142	for i := len(s) - 1; i >= 0; i-- {
143		if s[i] == c {
144			return i
145		}
146	}
147	return -1
148}
149
150// IndexRune interprets s as a sequence of UTF-8-encoded code points.
151// It returns the byte index of the first occurrence in s of the given rune.
152// It returns -1 if rune is not present in s.
153// If r is utf8.RuneError, it returns the first instance of any
154// invalid UTF-8 byte sequence.
155func IndexRune(s []byte, r rune) int {
156	switch {
157	case 0 <= r && r < utf8.RuneSelf:
158		return IndexByte(s, byte(r))
159	case r == utf8.RuneError:
160		for i := 0; i < len(s); {
161			r1, n := utf8.DecodeRune(s[i:])
162			if r1 == utf8.RuneError {
163				return i
164			}
165			i += n
166		}
167		return -1
168	case !utf8.ValidRune(r):
169		return -1
170	default:
171		var b [utf8.UTFMax]byte
172		n := utf8.EncodeRune(b[:], r)
173		return Index(s, b[:n])
174	}
175}
176
177// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
178// It returns the byte index of the first occurrence in s of any of the Unicode
179// code points in chars. It returns -1 if chars is empty or if there is no code
180// point in common.
181func IndexAny(s []byte, chars string) int {
182	if chars == "" {
183		// Avoid scanning all of s.
184		return -1
185	}
186	if len(s) == 1 {
187		r := rune(s[0])
188		if r >= utf8.RuneSelf {
189			// search utf8.RuneError.
190			for _, r = range chars {
191				if r == utf8.RuneError {
192					return 0
193				}
194			}
195			return -1
196		}
197		if bytealg.IndexByteString(chars, s[0]) >= 0 {
198			return 0
199		}
200		return -1
201	}
202	if len(chars) == 1 {
203		r := rune(chars[0])
204		if r >= utf8.RuneSelf {
205			r = utf8.RuneError
206		}
207		return IndexRune(s, r)
208	}
209	if len(s) > 8 {
210		if as, isASCII := makeASCIISet(chars); isASCII {
211			for i, c := range s {
212				if as.contains(c) {
213					return i
214				}
215			}
216			return -1
217		}
218	}
219	var width int
220	for i := 0; i < len(s); i += width {
221		r := rune(s[i])
222		if r < utf8.RuneSelf {
223			if bytealg.IndexByteString(chars, s[i]) >= 0 {
224				return i
225			}
226			width = 1
227			continue
228		}
229		r, width = utf8.DecodeRune(s[i:])
230		if r != utf8.RuneError {
231			// r is 2 to 4 bytes
232			if len(chars) == width {
233				if chars == string(r) {
234					return i
235				}
236				continue
237			}
238			// Use bytealg.IndexString for performance if available.
239			if bytealg.MaxLen >= width {
240				if bytealg.IndexString(chars, string(r)) >= 0 {
241					return i
242				}
243				continue
244			}
245		}
246		for _, ch := range chars {
247			if r == ch {
248				return i
249			}
250		}
251	}
252	return -1
253}
254
255// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
256// points. It returns the byte index of the last occurrence in s of any of
257// the Unicode code points in chars. It returns -1 if chars is empty or if
258// there is no code point in common.
259func LastIndexAny(s []byte, chars string) int {
260	if chars == "" {
261		// Avoid scanning all of s.
262		return -1
263	}
264	if len(s) > 8 {
265		if as, isASCII := makeASCIISet(chars); isASCII {
266			for i := len(s) - 1; i >= 0; i-- {
267				if as.contains(s[i]) {
268					return i
269				}
270			}
271			return -1
272		}
273	}
274	if len(s) == 1 {
275		r := rune(s[0])
276		if r >= utf8.RuneSelf {
277			for _, r = range chars {
278				if r == utf8.RuneError {
279					return 0
280				}
281			}
282			return -1
283		}
284		if bytealg.IndexByteString(chars, s[0]) >= 0 {
285			return 0
286		}
287		return -1
288	}
289	if len(chars) == 1 {
290		cr := rune(chars[0])
291		if cr >= utf8.RuneSelf {
292			cr = utf8.RuneError
293		}
294		for i := len(s); i > 0; {
295			r, size := utf8.DecodeLastRune(s[:i])
296			i -= size
297			if r == cr {
298				return i
299			}
300		}
301		return -1
302	}
303	for i := len(s); i > 0; {
304		r := rune(s[i-1])
305		if r < utf8.RuneSelf {
306			if bytealg.IndexByteString(chars, s[i-1]) >= 0 {
307				return i - 1
308			}
309			i--
310			continue
311		}
312		r, size := utf8.DecodeLastRune(s[:i])
313		i -= size
314		if r != utf8.RuneError {
315			// r is 2 to 4 bytes
316			if len(chars) == size {
317				if chars == string(r) {
318					return i
319				}
320				continue
321			}
322			// Use bytealg.IndexString for performance if available.
323			if bytealg.MaxLen >= size {
324				if bytealg.IndexString(chars, string(r)) >= 0 {
325					return i
326				}
327				continue
328			}
329		}
330		for _, ch := range chars {
331			if r == ch {
332				return i
333			}
334		}
335	}
336	return -1
337}
338
339// Generic split: splits after each instance of sep,
340// including sepSave bytes of sep in the subslices.
341func genSplit(s, sep []byte, sepSave, n int) [][]byte {
342	if n == 0 {
343		return nil
344	}
345	if len(sep) == 0 {
346		return explode(s, n)
347	}
348	if n < 0 {
349		n = Count(s, sep) + 1
350	}
351
352	a := make([][]byte, n)
353	n--
354	i := 0
355	for i < n {
356		m := Index(s, sep)
357		if m < 0 {
358			break
359		}
360		a[i] = s[: m+sepSave : m+sepSave]
361		s = s[m+len(sep):]
362		i++
363	}
364	a[i] = s
365	return a[:i+1]
366}
367
368// SplitN slices s into subslices separated by sep and returns a slice of
369// the subslices between those separators.
370// If sep is empty, SplitN splits after each UTF-8 sequence.
371// The count determines the number of subslices to return:
372//   n > 0: at most n subslices; the last subslice will be the unsplit remainder.
373//   n == 0: the result is nil (zero subslices)
374//   n < 0: all subslices
375func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
376
377// SplitAfterN slices s into subslices after each instance of sep and
378// returns a slice of those subslices.
379// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
380// The count determines the number of subslices to return:
381//   n > 0: at most n subslices; the last subslice will be the unsplit remainder.
382//   n == 0: the result is nil (zero subslices)
383//   n < 0: all subslices
384func SplitAfterN(s, sep []byte, n int) [][]byte {
385	return genSplit(s, sep, len(sep), n)
386}
387
388// Split slices s into all subslices separated by sep and returns a slice of
389// the subslices between those separators.
390// If sep is empty, Split splits after each UTF-8 sequence.
391// It is equivalent to SplitN with a count of -1.
392func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
393
394// SplitAfter slices s into all subslices after each instance of sep and
395// returns a slice of those subslices.
396// If sep is empty, SplitAfter splits after each UTF-8 sequence.
397// It is equivalent to SplitAfterN with a count of -1.
398func SplitAfter(s, sep []byte) [][]byte {
399	return genSplit(s, sep, len(sep), -1)
400}
401
402var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
403
404// Fields interprets s as a sequence of UTF-8-encoded code points.
405// It splits the slice s around each instance of one or more consecutive white space
406// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
407// empty slice if s contains only white space.
408func Fields(s []byte) [][]byte {
409	// First count the fields.
410	// This is an exact count if s is ASCII, otherwise it is an approximation.
411	n := 0
412	wasSpace := 1
413	// setBits is used to track which bits are set in the bytes of s.
414	setBits := uint8(0)
415	for i := 0; i < len(s); i++ {
416		r := s[i]
417		setBits |= r
418		isSpace := int(asciiSpace[r])
419		n += wasSpace & ^isSpace
420		wasSpace = isSpace
421	}
422
423	if setBits >= utf8.RuneSelf {
424		// Some runes in the input slice are not ASCII.
425		return FieldsFunc(s, unicode.IsSpace)
426	}
427
428	// ASCII fast path
429	a := make([][]byte, n)
430	na := 0
431	fieldStart := 0
432	i := 0
433	// Skip spaces in the front of the input.
434	for i < len(s) && asciiSpace[s[i]] != 0 {
435		i++
436	}
437	fieldStart = i
438	for i < len(s) {
439		if asciiSpace[s[i]] == 0 {
440			i++
441			continue
442		}
443		a[na] = s[fieldStart:i:i]
444		na++
445		i++
446		// Skip spaces in between fields.
447		for i < len(s) && asciiSpace[s[i]] != 0 {
448			i++
449		}
450		fieldStart = i
451	}
452	if fieldStart < len(s) { // Last field might end at EOF.
453		a[na] = s[fieldStart:len(s):len(s)]
454	}
455	return a
456}
457
458// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
459// It splits the slice s at each run of code points c satisfying f(c) and
460// returns a slice of subslices of s. If all code points in s satisfy f(c), or
461// len(s) == 0, an empty slice is returned.
462//
463// FieldsFunc makes no guarantees about the order in which it calls f(c)
464// and assumes that f always returns the same value for a given c.
465func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
466	// A span is used to record a slice of s of the form s[start:end].
467	// The start index is inclusive and the end index is exclusive.
468	type span struct {
469		start int
470		end   int
471	}
472	spans := make([]span, 0, 32)
473
474	// Find the field start and end indices.
475	// Doing this in a separate pass (rather than slicing the string s
476	// and collecting the result substrings right away) is significantly
477	// more efficient, possibly due to cache effects.
478	start := -1 // valid span start if >= 0
479	for i := 0; i < len(s); {
480		size := 1
481		r := rune(s[i])
482		if r >= utf8.RuneSelf {
483			r, size = utf8.DecodeRune(s[i:])
484		}
485		if f(r) {
486			if start >= 0 {
487				spans = append(spans, span{start, i})
488				start = -1
489			}
490		} else {
491			if start < 0 {
492				start = i
493			}
494		}
495		i += size
496	}
497
498	// Last field might end at EOF.
499	if start >= 0 {
500		spans = append(spans, span{start, len(s)})
501	}
502
503	// Create subslices from recorded field indices.
504	a := make([][]byte, len(spans))
505	for i, span := range spans {
506		a[i] = s[span.start:span.end:span.end]
507	}
508
509	return a
510}
511
512// Join concatenates the elements of s to create a new byte slice. The separator
513// sep is placed between elements in the resulting slice.
514func Join(s [][]byte, sep []byte) []byte {
515	if len(s) == 0 {
516		return []byte{}
517	}
518	if len(s) == 1 {
519		// Just return a copy.
520		return append([]byte(nil), s[0]...)
521	}
522	n := len(sep) * (len(s) - 1)
523	for _, v := range s {
524		n += len(v)
525	}
526
527	b := make([]byte, n)
528	bp := copy(b, s[0])
529	for _, v := range s[1:] {
530		bp += copy(b[bp:], sep)
531		bp += copy(b[bp:], v)
532	}
533	return b
534}
535
536// HasPrefix tests whether the byte slice s begins with prefix.
537func HasPrefix(s, prefix []byte) bool {
538	return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
539}
540
541// HasSuffix tests whether the byte slice s ends with suffix.
542func HasSuffix(s, suffix []byte) bool {
543	return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
544}
545
546// Map returns a copy of the byte slice s with all its characters modified
547// according to the mapping function. If mapping returns a negative value, the character is
548// dropped from the byte slice with no replacement. The characters in s and the
549// output are interpreted as UTF-8-encoded code points.
550func Map(mapping func(r rune) rune, s []byte) []byte {
551	// In the worst case, the slice can grow when mapped, making
552	// things unpleasant. But it's so rare we barge in assuming it's
553	// fine. It could also shrink but that falls out naturally.
554	maxbytes := len(s) // length of b
555	nbytes := 0        // number of bytes encoded in b
556	b := make([]byte, maxbytes)
557	for i := 0; i < len(s); {
558		wid := 1
559		r := rune(s[i])
560		if r >= utf8.RuneSelf {
561			r, wid = utf8.DecodeRune(s[i:])
562		}
563		r = mapping(r)
564		if r >= 0 {
565			rl := utf8.RuneLen(r)
566			if rl < 0 {
567				rl = len(string(utf8.RuneError))
568			}
569			if nbytes+rl > maxbytes {
570				// Grow the buffer.
571				maxbytes = maxbytes*2 + utf8.UTFMax
572				nb := make([]byte, maxbytes)
573				copy(nb, b[0:nbytes])
574				b = nb
575			}
576			nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
577		}
578		i += wid
579	}
580	return b[0:nbytes]
581}
582
583// Repeat returns a new byte slice consisting of count copies of b.
584//
585// It panics if count is negative or if
586// the result of (len(b) * count) overflows.
587func Repeat(b []byte, count int) []byte {
588	if count == 0 {
589		return []byte{}
590	}
591	// Since we cannot return an error on overflow,
592	// we should panic if the repeat will generate
593	// an overflow.
594	// See Issue golang.org/issue/16237.
595	if count < 0 {
596		panic("bytes: negative Repeat count")
597	} else if len(b)*count/count != len(b) {
598		panic("bytes: Repeat count causes overflow")
599	}
600
601	nb := make([]byte, len(b)*count)
602	bp := copy(nb, b)
603	for bp < len(nb) {
604		copy(nb[bp:], nb[:bp])
605		bp *= 2
606	}
607	return nb
608}
609
610// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to
611// their upper case.
612func ToUpper(s []byte) []byte {
613	isASCII, hasLower := true, false
614	for i := 0; i < len(s); i++ {
615		c := s[i]
616		if c >= utf8.RuneSelf {
617			isASCII = false
618			break
619		}
620		hasLower = hasLower || ('a' <= c && c <= 'z')
621	}
622
623	if isASCII { // optimize for ASCII-only byte slices.
624		if !hasLower {
625			// Just return a copy.
626			return append([]byte(""), s...)
627		}
628		b := make([]byte, len(s))
629		for i := 0; i < len(s); i++ {
630			c := s[i]
631			if 'a' <= c && c <= 'z' {
632				c -= 'a' - 'A'
633			}
634			b[i] = c
635		}
636		return b
637	}
638	return Map(unicode.ToUpper, s)
639}
640
641// ToLower returns a copy of the byte slice s with all Unicode letters mapped to
642// their lower case.
643func ToLower(s []byte) []byte {
644	isASCII, hasUpper := true, false
645	for i := 0; i < len(s); i++ {
646		c := s[i]
647		if c >= utf8.RuneSelf {
648			isASCII = false
649			break
650		}
651		hasUpper = hasUpper || ('A' <= c && c <= 'Z')
652	}
653
654	if isASCII { // optimize for ASCII-only byte slices.
655		if !hasUpper {
656			return append([]byte(""), s...)
657		}
658		b := make([]byte, len(s))
659		for i := 0; i < len(s); i++ {
660			c := s[i]
661			if 'A' <= c && c <= 'Z' {
662				c += 'a' - 'A'
663			}
664			b[i] = c
665		}
666		return b
667	}
668	return Map(unicode.ToLower, s)
669}
670
671// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
672func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
673
674// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
675// upper case, giving priority to the special casing rules.
676func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
677	return Map(c.ToUpper, s)
678}
679
680// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
681// lower case, giving priority to the special casing rules.
682func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
683	return Map(c.ToLower, s)
684}
685
686// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
687// title case, giving priority to the special casing rules.
688func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
689	return Map(c.ToTitle, s)
690}
691
692// ToValidUTF8 treats s as UTF-8-encoded bytes and returns a copy with each run of bytes
693// representing invalid UTF-8 replaced with the bytes in replacement, which may be empty.
694func ToValidUTF8(s, replacement []byte) []byte {
695	b := make([]byte, 0, len(s)+len(replacement))
696	invalid := false // previous byte was from an invalid UTF-8 sequence
697	for i := 0; i < len(s); {
698		c := s[i]
699		if c < utf8.RuneSelf {
700			i++
701			invalid = false
702			b = append(b, c)
703			continue
704		}
705		_, wid := utf8.DecodeRune(s[i:])
706		if wid == 1 {
707			i++
708			if !invalid {
709				invalid = true
710				b = append(b, replacement...)
711			}
712			continue
713		}
714		invalid = false
715		b = append(b, s[i:i+wid]...)
716		i += wid
717	}
718	return b
719}
720
721// isSeparator reports whether the rune could mark a word boundary.
722// TODO: update when package unicode captures more of the properties.
723func isSeparator(r rune) bool {
724	// ASCII alphanumerics and underscore are not separators
725	if r <= 0x7F {
726		switch {
727		case '0' <= r && r <= '9':
728			return false
729		case 'a' <= r && r <= 'z':
730			return false
731		case 'A' <= r && r <= 'Z':
732			return false
733		case r == '_':
734			return false
735		}
736		return true
737	}
738	// Letters and digits are not separators
739	if unicode.IsLetter(r) || unicode.IsDigit(r) {
740		return false
741	}
742	// Otherwise, all we can do for now is treat spaces as separators.
743	return unicode.IsSpace(r)
744}
745
746// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
747// words mapped to their title case.
748//
749// Deprecated: The rule Title uses for word boundaries does not handle Unicode
750// punctuation properly. Use golang.org/x/text/cases instead.
751func Title(s []byte) []byte {
752	// Use a closure here to remember state.
753	// Hackish but effective. Depends on Map scanning in order and calling
754	// the closure once per rune.
755	prev := ' '
756	return Map(
757		func(r rune) rune {
758			if isSeparator(prev) {
759				prev = r
760				return unicode.ToTitle(r)
761			}
762			prev = r
763			return r
764		},
765		s)
766}
767
768// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
769// all leading UTF-8-encoded code points c that satisfy f(c).
770func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
771	i := indexFunc(s, f, false)
772	if i == -1 {
773		return nil
774	}
775	return s[i:]
776}
777
778// TrimRightFunc returns a subslice of s by slicing off all trailing
779// UTF-8-encoded code points c that satisfy f(c).
780func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
781	i := lastIndexFunc(s, f, false)
782	if i >= 0 && s[i] >= utf8.RuneSelf {
783		_, wid := utf8.DecodeRune(s[i:])
784		i += wid
785	} else {
786		i++
787	}
788	return s[0:i]
789}
790
791// TrimFunc returns a subslice of s by slicing off all leading and trailing
792// UTF-8-encoded code points c that satisfy f(c).
793func TrimFunc(s []byte, f func(r rune) bool) []byte {
794	return TrimRightFunc(TrimLeftFunc(s, f), f)
795}
796
797// TrimPrefix returns s without the provided leading prefix string.
798// If s doesn't start with prefix, s is returned unchanged.
799func TrimPrefix(s, prefix []byte) []byte {
800	if HasPrefix(s, prefix) {
801		return s[len(prefix):]
802	}
803	return s
804}
805
806// TrimSuffix returns s without the provided trailing suffix string.
807// If s doesn't end with suffix, s is returned unchanged.
808func TrimSuffix(s, suffix []byte) []byte {
809	if HasSuffix(s, suffix) {
810		return s[:len(s)-len(suffix)]
811	}
812	return s
813}
814
815// IndexFunc interprets s as a sequence of UTF-8-encoded code points.
816// It returns the byte index in s of the first Unicode
817// code point satisfying f(c), or -1 if none do.
818func IndexFunc(s []byte, f func(r rune) bool) int {
819	return indexFunc(s, f, true)
820}
821
822// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
823// It returns the byte index in s of the last Unicode
824// code point satisfying f(c), or -1 if none do.
825func LastIndexFunc(s []byte, f func(r rune) bool) int {
826	return lastIndexFunc(s, f, true)
827}
828
829// indexFunc is the same as IndexFunc except that if
830// truth==false, the sense of the predicate function is
831// inverted.
832func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
833	start := 0
834	for start < len(s) {
835		wid := 1
836		r := rune(s[start])
837		if r >= utf8.RuneSelf {
838			r, wid = utf8.DecodeRune(s[start:])
839		}
840		if f(r) == truth {
841			return start
842		}
843		start += wid
844	}
845	return -1
846}
847
848// lastIndexFunc is the same as LastIndexFunc except that if
849// truth==false, the sense of the predicate function is
850// inverted.
851func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
852	for i := len(s); i > 0; {
853		r, size := rune(s[i-1]), 1
854		if r >= utf8.RuneSelf {
855			r, size = utf8.DecodeLastRune(s[0:i])
856		}
857		i -= size
858		if f(r) == truth {
859			return i
860		}
861	}
862	return -1
863}
864
865// asciiSet is a 32-byte value, where each bit represents the presence of a
866// given ASCII character in the set. The 128-bits of the lower 16 bytes,
867// starting with the least-significant bit of the lowest word to the
868// most-significant bit of the highest word, map to the full range of all
869// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
870// ensuring that any non-ASCII character will be reported as not in the set.
871// This allocates a total of 32 bytes even though the upper half
872// is unused to avoid bounds checks in asciiSet.contains.
873type asciiSet [8]uint32
874
875// makeASCIISet creates a set of ASCII characters and reports whether all
876// characters in chars are ASCII.
877func makeASCIISet(chars string) (as asciiSet, ok bool) {
878	for i := 0; i < len(chars); i++ {
879		c := chars[i]
880		if c >= utf8.RuneSelf {
881			return as, false
882		}
883		as[c/32] |= 1 << (c % 32)
884	}
885	return as, true
886}
887
888// contains reports whether c is inside the set.
889func (as *asciiSet) contains(c byte) bool {
890	return (as[c/32] & (1 << (c % 32))) != 0
891}
892
893// containsRune is a simplified version of strings.ContainsRune
894// to avoid importing the strings package.
895// We avoid bytes.ContainsRune to avoid allocating a temporary copy of s.
896func containsRune(s string, r rune) bool {
897	for _, c := range s {
898		if c == r {
899			return true
900		}
901	}
902	return false
903}
904
905// Trim returns a subslice of s by slicing off all leading and
906// trailing UTF-8-encoded code points contained in cutset.
907func Trim(s []byte, cutset string) []byte {
908	if len(s) == 0 || cutset == "" {
909		return s
910	}
911	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
912		return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
913	}
914	if as, ok := makeASCIISet(cutset); ok {
915		return trimLeftASCII(trimRightASCII(s, &as), &as)
916	}
917	return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
918}
919
920// TrimLeft returns a subslice of s by slicing off all leading
921// UTF-8-encoded code points contained in cutset.
922func TrimLeft(s []byte, cutset string) []byte {
923	if len(s) == 0 || cutset == "" {
924		return s
925	}
926	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
927		return trimLeftByte(s, cutset[0])
928	}
929	if as, ok := makeASCIISet(cutset); ok {
930		return trimLeftASCII(s, &as)
931	}
932	return trimLeftUnicode(s, cutset)
933}
934
935func trimLeftByte(s []byte, c byte) []byte {
936	for len(s) > 0 && s[0] == c {
937		s = s[1:]
938	}
939	return s
940}
941
942func trimLeftASCII(s []byte, as *asciiSet) []byte {
943	for len(s) > 0 {
944		if !as.contains(s[0]) {
945			break
946		}
947		s = s[1:]
948	}
949	return s
950}
951
952func trimLeftUnicode(s []byte, cutset string) []byte {
953	for len(s) > 0 {
954		r, n := rune(s[0]), 1
955		if r >= utf8.RuneSelf {
956			r, n = utf8.DecodeRune(s)
957		}
958		if !containsRune(cutset, r) {
959			break
960		}
961		s = s[n:]
962	}
963	return s
964}
965
966// TrimRight returns a subslice of s by slicing off all trailing
967// UTF-8-encoded code points that are contained in cutset.
968func TrimRight(s []byte, cutset string) []byte {
969	if len(s) == 0 || cutset == "" {
970		return s
971	}
972	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
973		return trimRightByte(s, cutset[0])
974	}
975	if as, ok := makeASCIISet(cutset); ok {
976		return trimRightASCII(s, &as)
977	}
978	return trimRightUnicode(s, cutset)
979}
980
981func trimRightByte(s []byte, c byte) []byte {
982	for len(s) > 0 && s[len(s)-1] == c {
983		s = s[:len(s)-1]
984	}
985	return s
986}
987
988func trimRightASCII(s []byte, as *asciiSet) []byte {
989	for len(s) > 0 {
990		if !as.contains(s[len(s)-1]) {
991			break
992		}
993		s = s[:len(s)-1]
994	}
995	return s
996}
997
998func trimRightUnicode(s []byte, cutset string) []byte {
999	for len(s) > 0 {
1000		r, n := rune(s[len(s)-1]), 1
1001		if r >= utf8.RuneSelf {
1002			r, n = utf8.DecodeLastRune(s)
1003		}
1004		if !containsRune(cutset, r) {
1005			break
1006		}
1007		s = s[:len(s)-n]
1008	}
1009	return s
1010}
1011
1012// TrimSpace returns a subslice of s by slicing off all leading and
1013// trailing white space, as defined by Unicode.
1014func TrimSpace(s []byte) []byte {
1015	// Fast path for ASCII: look for the first ASCII non-space byte
1016	start := 0
1017	for ; start < len(s); start++ {
1018		c := s[start]
1019		if c >= utf8.RuneSelf {
1020			// If we run into a non-ASCII byte, fall back to the
1021			// slower unicode-aware method on the remaining bytes
1022			return TrimFunc(s[start:], unicode.IsSpace)
1023		}
1024		if asciiSpace[c] == 0 {
1025			break
1026		}
1027	}
1028
1029	// Now look for the first ASCII non-space byte from the end
1030	stop := len(s)
1031	for ; stop > start; stop-- {
1032		c := s[stop-1]
1033		if c >= utf8.RuneSelf {
1034			return TrimFunc(s[start:stop], unicode.IsSpace)
1035		}
1036		if asciiSpace[c] == 0 {
1037			break
1038		}
1039	}
1040
1041	// At this point s[start:stop] starts and ends with an ASCII
1042	// non-space bytes, so we're done. Non-ASCII cases have already
1043	// been handled above.
1044	if start == stop {
1045		// Special case to preserve previous TrimLeftFunc behavior,
1046		// returning nil instead of empty slice if all spaces.
1047		return nil
1048	}
1049	return s[start:stop]
1050}
1051
1052// Runes interprets s as a sequence of UTF-8-encoded code points.
1053// It returns a slice of runes (Unicode code points) equivalent to s.
1054func Runes(s []byte) []rune {
1055	t := make([]rune, utf8.RuneCount(s))
1056	i := 0
1057	for len(s) > 0 {
1058		r, l := utf8.DecodeRune(s)
1059		t[i] = r
1060		i++
1061		s = s[l:]
1062	}
1063	return t
1064}
1065
1066// Replace returns a copy of the slice s with the first n
1067// non-overlapping instances of old replaced by new.
1068// If old is empty, it matches at the beginning of the slice
1069// and after each UTF-8 sequence, yielding up to k+1 replacements
1070// for a k-rune slice.
1071// If n < 0, there is no limit on the number of replacements.
1072func Replace(s, old, new []byte, n int) []byte {
1073	m := 0
1074	if n != 0 {
1075		// Compute number of replacements.
1076		m = Count(s, old)
1077	}
1078	if m == 0 {
1079		// Just return a copy.
1080		return append([]byte(nil), s...)
1081	}
1082	if n < 0 || m < n {
1083		n = m
1084	}
1085
1086	// Apply replacements to buffer.
1087	t := make([]byte, len(s)+n*(len(new)-len(old)))
1088	w := 0
1089	start := 0
1090	for i := 0; i < n; i++ {
1091		j := start
1092		if len(old) == 0 {
1093			if i > 0 {
1094				_, wid := utf8.DecodeRune(s[start:])
1095				j += wid
1096			}
1097		} else {
1098			j += Index(s[start:], old)
1099		}
1100		w += copy(t[w:], s[start:j])
1101		w += copy(t[w:], new)
1102		start = j + len(old)
1103	}
1104	w += copy(t[w:], s[start:])
1105	return t[0:w]
1106}
1107
1108// ReplaceAll returns a copy of the slice s with all
1109// non-overlapping instances of old replaced by new.
1110// If old is empty, it matches at the beginning of the slice
1111// and after each UTF-8 sequence, yielding up to k+1 replacements
1112// for a k-rune slice.
1113func ReplaceAll(s, old, new []byte) []byte {
1114	return Replace(s, old, new, -1)
1115}
1116
1117// EqualFold reports whether s and t, interpreted as UTF-8 strings,
1118// are equal under Unicode case-folding, which is a more general
1119// form of case-insensitivity.
1120func EqualFold(s, t []byte) bool {
1121	for len(s) != 0 && len(t) != 0 {
1122		// Extract first rune from each.
1123		var sr, tr rune
1124		if s[0] < utf8.RuneSelf {
1125			sr, s = rune(s[0]), s[1:]
1126		} else {
1127			r, size := utf8.DecodeRune(s)
1128			sr, s = r, s[size:]
1129		}
1130		if t[0] < utf8.RuneSelf {
1131			tr, t = rune(t[0]), t[1:]
1132		} else {
1133			r, size := utf8.DecodeRune(t)
1134			tr, t = r, t[size:]
1135		}
1136
1137		// If they match, keep going; if not, return false.
1138
1139		// Easy case.
1140		if tr == sr {
1141			continue
1142		}
1143
1144		// Make sr < tr to simplify what follows.
1145		if tr < sr {
1146			tr, sr = sr, tr
1147		}
1148		// Fast check for ASCII.
1149		if tr < utf8.RuneSelf {
1150			// ASCII only, sr/tr must be upper/lower case
1151			if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
1152				continue
1153			}
1154			return false
1155		}
1156
1157		// General case. SimpleFold(x) returns the next equivalent rune > x
1158		// or wraps around to smaller values.
1159		r := unicode.SimpleFold(sr)
1160		for r != sr && r < tr {
1161			r = unicode.SimpleFold(r)
1162		}
1163		if r == tr {
1164			continue
1165		}
1166		return false
1167	}
1168
1169	// One string is empty. Are both?
1170	return len(s) == len(t)
1171}
1172
1173// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
1174func Index(s, sep []byte) int {
1175	n := len(sep)
1176	switch {
1177	case n == 0:
1178		return 0
1179	case n == 1:
1180		return IndexByte(s, sep[0])
1181	case n == len(s):
1182		if Equal(sep, s) {
1183			return 0
1184		}
1185		return -1
1186	case n > len(s):
1187		return -1
1188	case n <= bytealg.MaxLen:
1189		// Use brute force when s and sep both are small
1190		if len(s) <= bytealg.MaxBruteForce {
1191			return bytealg.Index(s, sep)
1192		}
1193		c0 := sep[0]
1194		c1 := sep[1]
1195		i := 0
1196		t := len(s) - n + 1
1197		fails := 0
1198		for i < t {
1199			if s[i] != c0 {
1200				// IndexByte is faster than bytealg.Index, so use it as long as
1201				// we're not getting lots of false positives.
1202				o := IndexByte(s[i+1:t], c0)
1203				if o < 0 {
1204					return -1
1205				}
1206				i += o + 1
1207			}
1208			if s[i+1] == c1 && Equal(s[i:i+n], sep) {
1209				return i
1210			}
1211			fails++
1212			i++
1213			// Switch to bytealg.Index when IndexByte produces too many false positives.
1214			if fails > bytealg.Cutover(i) {
1215				r := bytealg.Index(s[i:], sep)
1216				if r >= 0 {
1217					return r + i
1218				}
1219				return -1
1220			}
1221		}
1222		return -1
1223	}
1224	c0 := sep[0]
1225	c1 := sep[1]
1226	i := 0
1227	fails := 0
1228	t := len(s) - n + 1
1229	for i < t {
1230		if s[i] != c0 {
1231			o := IndexByte(s[i+1:t], c0)
1232			if o < 0 {
1233				break
1234			}
1235			i += o + 1
1236		}
1237		if s[i+1] == c1 && Equal(s[i:i+n], sep) {
1238			return i
1239		}
1240		i++
1241		fails++
1242		if fails >= 4+i>>4 && i < t {
1243			// Give up on IndexByte, it isn't skipping ahead
1244			// far enough to be better than Rabin-Karp.
1245			// Experiments (using IndexPeriodic) suggest
1246			// the cutover is about 16 byte skips.
1247			// TODO: if large prefixes of sep are matching
1248			// we should cutover at even larger average skips,
1249			// because Equal becomes that much more expensive.
1250			// This code does not take that effect into account.
1251			j := bytealg.IndexRabinKarpBytes(s[i:], sep)
1252			if j < 0 {
1253				return -1
1254			}
1255			return i + j
1256		}
1257	}
1258	return -1
1259}
1260
1261// Cut slices s around the first instance of sep,
1262// returning the text before and after sep.
1263// The found result reports whether sep appears in s.
1264// If sep does not appear in s, cut returns s, nil, false.
1265//
1266// Cut returns slices of the original slice s, not copies.
1267func Cut(s, sep []byte) (before, after []byte, found bool) {
1268	if i := Index(s, sep); i >= 0 {
1269		return s[:i], s[i+len(sep):], true
1270	}
1271	return s, nil, false
1272}
1273