bytes.go

1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package bytes implements functions for the manipulation of byte slices.
6// It is analogous to the facilities of the strings package.
7package bytes
8
9import (
10	"internal/bytealg"
11	"unicode"
12	"unicode/utf8"
13)
14
15// Equal reports whether a and b
16// are the same length and contain the same bytes.
17// A nil argument is equivalent to an empty slice.
18func Equal(a, b []byte) bool {
19	// Neither cmd/compile nor gccgo allocates for these string conversions.
20	return string(a) == string(b)
21}
22
23// Compare returns an integer comparing two byte slices lexicographically.
24// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
25// A nil argument is equivalent to an empty slice.
26func Compare(a, b []byte) int {
27	return bytealg.Compare(a, b)
28}
29
30// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
31// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
32func explode(s []byte, n int) [][]byte {
33	if n <= 0 || n > len(s) {
34		n = len(s)
35	}
36	a := make([][]byte, n)
37	var size int
38	na := 0
39	for len(s) > 0 {
40		if na+1 >= n {
41			a[na] = s
42			na++
43			break
44		}
45		_, size = utf8.DecodeRune(s)
46		a[na] = s[0:size:size]
47		s = s[size:]
48		na++
49	}
50	return a[0:na]
51}
52
53// Count counts the number of non-overlapping instances of sep in s.
54// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
55func Count(s, sep []byte) int {
56	// special case
57	if len(sep) == 0 {
58		return utf8.RuneCount(s) + 1
59	}
60	if len(sep) == 1 {
61		return bytealg.Count(s, sep[0])
62	}
63	n := 0
64	for {
65		i := Index(s, sep)
66		if i == -1 {
67			return n
68		}
69		n++
70		s = s[i+len(sep):]
71	}
72}
73
74// Contains reports whether subslice is within b.
75func Contains(b, subslice []byte) bool {
76	return Index(b, subslice) != -1
77}
78
79// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
80func ContainsAny(b []byte, chars string) bool {
81	return IndexAny(b, chars) >= 0
82}
83
84// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
85func ContainsRune(b []byte, r rune) bool {
86	return IndexRune(b, r) >= 0
87}
88
89// IndexByte returns the index of the first instance of c in b, or -1 if c is not present in b.
90func IndexByte(b []byte, c byte) int {
91	return bytealg.IndexByte(b, c)
92}
93
94func indexBytePortable(s []byte, c byte) int {
95	for i, b := range s {
96		if b == c {
97			return i
98		}
99	}
100	return -1
101}
102
103// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
104func LastIndex(s, sep []byte) int {
105	n := len(sep)
106	switch {
107	case n == 0:
108		return len(s)
109	case n == 1:
110		return LastIndexByte(s, sep[0])
111	case n == len(s):
112		if Equal(s, sep) {
113			return 0
114		}
115		return -1
116	case n > len(s):
117		return -1
118	}
119	// Rabin-Karp search from the end of the string
120	hashss, pow := bytealg.HashStrRevBytes(sep)
121	last := len(s) - n
122	var h uint32
123	for i := len(s) - 1; i >= last; i-- {
124		h = h*bytealg.PrimeRK + uint32(s[i])
125	}
126	if h == hashss && Equal(s[last:], sep) {
127		return last
128	}
129	for i := last - 1; i >= 0; i-- {
130		h *= bytealg.PrimeRK
131		h += uint32(s[i])
132		h -= pow * uint32(s[i+n])
133		if h == hashss && Equal(s[i:i+n], sep) {
134			return i
135		}
136	}
137	return -1
138}
139
140// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
141func LastIndexByte(s []byte, c byte) int {
142	for i := len(s) - 1; i >= 0; i-- {
143		if s[i] == c {
144			return i
145		}
146	}
147	return -1
148}
149
150// IndexRune interprets s as a sequence of UTF-8-encoded code points.
151// It returns the byte index of the first occurrence in s of the given rune.
152// It returns -1 if rune is not present in s.
153// If r is utf8.RuneError, it returns the first instance of any
154// invalid UTF-8 byte sequence.
155func IndexRune(s []byte, r rune) int {
156	switch {
157	case 0 <= r && r < utf8.RuneSelf:
158		return IndexByte(s, byte(r))
159	case r == utf8.RuneError:
160		for i := 0; i < len(s); {
161			r1, n := utf8.DecodeRune(s[i:])
162			if r1 == utf8.RuneError {
163				return i
164			}
165			i += n
166		}
167		return -1
168	case !utf8.ValidRune(r):
169		return -1
170	default:
171		var b [utf8.UTFMax]byte
172		n := utf8.EncodeRune(b[:], r)
173		return Index(s, b[:n])
174	}
175}
176
177// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
178// It returns the byte index of the first occurrence in s of any of the Unicode
179// code points in chars. It returns -1 if chars is empty or if there is no code
180// point in common.
181func IndexAny(s []byte, chars string) int {
182	if chars == "" {
183		// Avoid scanning all of s.
184		return -1
185	}
186	if len(s) == 1 {
187		r := rune(s[0])
188		if r >= utf8.RuneSelf {
189			// search utf8.RuneError.
190			for _, r = range chars {
191				if r == utf8.RuneError {
192					return 0
193				}
194			}
195			return -1
196		}
197		if bytealg.IndexByteString(chars, s[0]) >= 0 {
198			return 0
199		}
200		return -1
201	}
202	if len(chars) == 1 {
203		r := rune(chars[0])
204		if r >= utf8.RuneSelf {
205			r = utf8.RuneError
206		}
207		return IndexRune(s, r)
208	}
209	if len(s) > 8 {
210		if as, isASCII := makeASCIISet(chars); isASCII {
211			for i, c := range s {
212				if as.contains(c) {
213					return i
214				}
215			}
216			return -1
217		}
218	}
219	var width int
220	for i := 0; i < len(s); i += width {
221		r := rune(s[i])
222		if r < utf8.RuneSelf {
223			if bytealg.IndexByteString(chars, s[i]) >= 0 {
224				return i
225			}
226			width = 1
227			continue
228		}
229		r, width = utf8.DecodeRune(s[i:])
230		if r != utf8.RuneError {
231			// r is 2 to 4 bytes
232			if len(chars) == width {
233				if chars == string(r) {
234					return i
235				}
236				continue
237			}
238			// Use bytealg.IndexString for performance if available.
239			if bytealg.MaxLen >= width {
240				if bytealg.IndexString(chars, string(r)) >= 0 {
241					return i
242				}
243				continue
244			}
245		}
246		for _, ch := range chars {
247			if r == ch {
248				return i
249			}
250		}
251	}
252	return -1
253}
254
255// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
256// points. It returns the byte index of the last occurrence in s of any of
257// the Unicode code points in chars. It returns -1 if chars is empty or if
258// there is no code point in common.
259func LastIndexAny(s []byte, chars string) int {
260	if chars == "" {
261		// Avoid scanning all of s.
262		return -1
263	}
264	if len(s) > 8 {
265		if as, isASCII := makeASCIISet(chars); isASCII {
266			for i := len(s) - 1; i >= 0; i-- {
267				if as.contains(s[i]) {
268					return i
269				}
270			}
271			return -1
272		}
273	}
274	if len(s) == 1 {
275		r := rune(s[0])
276		if r >= utf8.RuneSelf {
277			for _, r = range chars {
278				if r == utf8.RuneError {
279					return 0
280				}
281			}
282			return -1
283		}
284		if bytealg.IndexByteString(chars, s[0]) >= 0 {
285			return 0
286		}
287		return -1
288	}
289	if len(chars) == 1 {
290		cr := rune(chars[0])
291		if cr >= utf8.RuneSelf {
292			cr = utf8.RuneError
293		}
294		for i := len(s); i > 0; {
295			r, size := utf8.DecodeLastRune(s[:i])
296			i -= size
297			if r == cr {
298				return i
299			}
300		}
301		return -1
302	}
303	for i := len(s); i > 0; {
304		r := rune(s[i-1])
305		if r < utf8.RuneSelf {
306			if bytealg.IndexByteString(chars, s[i-1]) >= 0 {
307				return i - 1
308			}
309			i--
310			continue
311		}
312		r, size := utf8.DecodeLastRune(s[:i])
313		i -= size
314		if r != utf8.RuneError {
315			// r is 2 to 4 bytes
316			if len(chars) == size {
317				if chars == string(r) {
318					return i
319				}
320				continue
321			}
322			// Use bytealg.IndexString for performance if available.
323			if bytealg.MaxLen >= size {
324				if bytealg.IndexString(chars, string(r)) >= 0 {
325					return i
326				}
327				continue
328			}
329		}
330		for _, ch := range chars {
331			if r == ch {
332				return i
333			}
334		}
335	}
336	return -1
337}
338
339// Generic split: splits after each instance of sep,
340// including sepSave bytes of sep in the subslices.
341func genSplit(s, sep []byte, sepSave, n int) [][]byte {
342	if n == 0 {
343		return nil
344	}
345	if len(sep) == 0 {
346		return explode(s, n)
347	}
348	if n < 0 {
349		n = Count(s, sep) + 1
350	}
351	if n > len(s)+1 {
352		n = len(s) + 1
353	}
354
355	a := make([][]byte, n)
356	n--
357	i := 0
358	for i < n {
359		m := Index(s, sep)
360		if m < 0 {
361			break
362		}
363		a[i] = s[: m+sepSave : m+sepSave]
364		s = s[m+len(sep):]
365		i++
366	}
367	a[i] = s
368	return a[:i+1]
369}
370
371// SplitN slices s into subslices separated by sep and returns a slice of
372// the subslices between those separators.
373// If sep is empty, SplitN splits after each UTF-8 sequence.
374// The count determines the number of subslices to return:
375//
376//	n > 0: at most n subslices; the last subslice will be the unsplit remainder.
377//	n == 0: the result is nil (zero subslices)
378//	n < 0: all subslices
379//
380// To split around the first instance of a separator, see Cut.
381func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
382
383// SplitAfterN slices s into subslices after each instance of sep and
384// returns a slice of those subslices.
385// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
386// The count determines the number of subslices to return:
387//
388//	n > 0: at most n subslices; the last subslice will be the unsplit remainder.
389//	n == 0: the result is nil (zero subslices)
390//	n < 0: all subslices
391func SplitAfterN(s, sep []byte, n int) [][]byte {
392	return genSplit(s, sep, len(sep), n)
393}
394
395// Split slices s into all subslices separated by sep and returns a slice of
396// the subslices between those separators.
397// If sep is empty, Split splits after each UTF-8 sequence.
398// It is equivalent to SplitN with a count of -1.
399//
400// To split around the first instance of a separator, see Cut.
401func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
402
403// SplitAfter slices s into all subslices after each instance of sep and
404// returns a slice of those subslices.
405// If sep is empty, SplitAfter splits after each UTF-8 sequence.
406// It is equivalent to SplitAfterN with a count of -1.
407func SplitAfter(s, sep []byte) [][]byte {
408	return genSplit(s, sep, len(sep), -1)
409}
410
411var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
412
413// Fields interprets s as a sequence of UTF-8-encoded code points.
414// It splits the slice s around each instance of one or more consecutive white space
415// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
416// empty slice if s contains only white space.
417func Fields(s []byte) [][]byte {
418	// First count the fields.
419	// This is an exact count if s is ASCII, otherwise it is an approximation.
420	n := 0
421	wasSpace := 1
422	// setBits is used to track which bits are set in the bytes of s.
423	setBits := uint8(0)
424	for i := 0; i < len(s); i++ {
425		r := s[i]
426		setBits |= r
427		isSpace := int(asciiSpace[r])
428		n += wasSpace & ^isSpace
429		wasSpace = isSpace
430	}
431
432	if setBits >= utf8.RuneSelf {
433		// Some runes in the input slice are not ASCII.
434		return FieldsFunc(s, unicode.IsSpace)
435	}
436
437	// ASCII fast path
438	a := make([][]byte, n)
439	na := 0
440	fieldStart := 0
441	i := 0
442	// Skip spaces in the front of the input.
443	for i < len(s) && asciiSpace[s[i]] != 0 {
444		i++
445	}
446	fieldStart = i
447	for i < len(s) {
448		if asciiSpace[s[i]] == 0 {
449			i++
450			continue
451		}
452		a[na] = s[fieldStart:i:i]
453		na++
454		i++
455		// Skip spaces in between fields.
456		for i < len(s) && asciiSpace[s[i]] != 0 {
457			i++
458		}
459		fieldStart = i
460	}
461	if fieldStart < len(s) { // Last field might end at EOF.
462		a[na] = s[fieldStart:len(s):len(s)]
463	}
464	return a
465}
466
467// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
468// It splits the slice s at each run of code points c satisfying f(c) and
469// returns a slice of subslices of s. If all code points in s satisfy f(c), or
470// len(s) == 0, an empty slice is returned.
471//
472// FieldsFunc makes no guarantees about the order in which it calls f(c)
473// and assumes that f always returns the same value for a given c.
474func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
475	// A span is used to record a slice of s of the form s[start:end].
476	// The start index is inclusive and the end index is exclusive.
477	type span struct {
478		start int
479		end   int
480	}
481	spans := make([]span, 0, 32)
482
483	// Find the field start and end indices.
484	// Doing this in a separate pass (rather than slicing the string s
485	// and collecting the result substrings right away) is significantly
486	// more efficient, possibly due to cache effects.
487	start := -1 // valid span start if >= 0
488	for i := 0; i < len(s); {
489		size := 1
490		r := rune(s[i])
491		if r >= utf8.RuneSelf {
492			r, size = utf8.DecodeRune(s[i:])
493		}
494		if f(r) {
495			if start >= 0 {
496				spans = append(spans, span{start, i})
497				start = -1
498			}
499		} else {
500			if start < 0 {
501				start = i
502			}
503		}
504		i += size
505	}
506
507	// Last field might end at EOF.
508	if start >= 0 {
509		spans = append(spans, span{start, len(s)})
510	}
511
512	// Create subslices from recorded field indices.
513	a := make([][]byte, len(spans))
514	for i, span := range spans {
515		a[i] = s[span.start:span.end:span.end]
516	}
517
518	return a
519}
520
521// Join concatenates the elements of s to create a new byte slice. The separator
522// sep is placed between elements in the resulting slice.
523func Join(s [][]byte, sep []byte) []byte {
524	if len(s) == 0 {
525		return []byte{}
526	}
527	if len(s) == 1 {
528		// Just return a copy.
529		return append([]byte(nil), s[0]...)
530	}
531	n := len(sep) * (len(s) - 1)
532	for _, v := range s {
533		n += len(v)
534	}
535
536	b := make([]byte, n)
537	bp := copy(b, s[0])
538	for _, v := range s[1:] {
539		bp += copy(b[bp:], sep)
540		bp += copy(b[bp:], v)
541	}
542	return b
543}
544
545// HasPrefix tests whether the byte slice s begins with prefix.
546func HasPrefix(s, prefix []byte) bool {
547	return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
548}
549
550// HasSuffix tests whether the byte slice s ends with suffix.
551func HasSuffix(s, suffix []byte) bool {
552	return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
553}
554
555// Map returns a copy of the byte slice s with all its characters modified
556// according to the mapping function. If mapping returns a negative value, the character is
557// dropped from the byte slice with no replacement. The characters in s and the
558// output are interpreted as UTF-8-encoded code points.
559func Map(mapping func(r rune) rune, s []byte) []byte {
560	// In the worst case, the slice can grow when mapped, making
561	// things unpleasant. But it's so rare we barge in assuming it's
562	// fine. It could also shrink but that falls out naturally.
563	maxbytes := len(s) // length of b
564	nbytes := 0        // number of bytes encoded in b
565	b := make([]byte, maxbytes)
566	for i := 0; i < len(s); {
567		wid := 1
568		r := rune(s[i])
569		if r >= utf8.RuneSelf {
570			r, wid = utf8.DecodeRune(s[i:])
571		}
572		r = mapping(r)
573		if r >= 0 {
574			rl := utf8.RuneLen(r)
575			if rl < 0 {
576				rl = len(string(utf8.RuneError))
577			}
578			if nbytes+rl > maxbytes {
579				// Grow the buffer.
580				maxbytes = maxbytes*2 + utf8.UTFMax
581				nb := make([]byte, maxbytes)
582				copy(nb, b[0:nbytes])
583				b = nb
584			}
585			nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
586		}
587		i += wid
588	}
589	return b[0:nbytes]
590}
591
592// Repeat returns a new byte slice consisting of count copies of b.
593//
594// It panics if count is negative or if
595// the result of (len(b) * count) overflows.
596func Repeat(b []byte, count int) []byte {
597	if count == 0 {
598		return []byte{}
599	}
600	// Since we cannot return an error on overflow,
601	// we should panic if the repeat will generate
602	// an overflow.
603	// See Issue golang.org/issue/16237.
604	if count < 0 {
605		panic("bytes: negative Repeat count")
606	} else if len(b)*count/count != len(b) {
607		panic("bytes: Repeat count causes overflow")
608	}
609
610	nb := make([]byte, len(b)*count)
611	bp := copy(nb, b)
612	for bp < len(nb) {
613		copy(nb[bp:], nb[:bp])
614		bp *= 2
615	}
616	return nb
617}
618
619// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to
620// their upper case.
621func ToUpper(s []byte) []byte {
622	isASCII, hasLower := true, false
623	for i := 0; i < len(s); i++ {
624		c := s[i]
625		if c >= utf8.RuneSelf {
626			isASCII = false
627			break
628		}
629		hasLower = hasLower || ('a' <= c && c <= 'z')
630	}
631
632	if isASCII { // optimize for ASCII-only byte slices.
633		if !hasLower {
634			// Just return a copy.
635			return append([]byte(""), s...)
636		}
637		b := make([]byte, len(s))
638		for i := 0; i < len(s); i++ {
639			c := s[i]
640			if 'a' <= c && c <= 'z' {
641				c -= 'a' - 'A'
642			}
643			b[i] = c
644		}
645		return b
646	}
647	return Map(unicode.ToUpper, s)
648}
649
650// ToLower returns a copy of the byte slice s with all Unicode letters mapped to
651// their lower case.
652func ToLower(s []byte) []byte {
653	isASCII, hasUpper := true, false
654	for i := 0; i < len(s); i++ {
655		c := s[i]
656		if c >= utf8.RuneSelf {
657			isASCII = false
658			break
659		}
660		hasUpper = hasUpper || ('A' <= c && c <= 'Z')
661	}
662
663	if isASCII { // optimize for ASCII-only byte slices.
664		if !hasUpper {
665			return append([]byte(""), s...)
666		}
667		b := make([]byte, len(s))
668		for i := 0; i < len(s); i++ {
669			c := s[i]
670			if 'A' <= c && c <= 'Z' {
671				c += 'a' - 'A'
672			}
673			b[i] = c
674		}
675		return b
676	}
677	return Map(unicode.ToLower, s)
678}
679
680// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
681func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
682
683// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
684// upper case, giving priority to the special casing rules.
685func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
686	return Map(c.ToUpper, s)
687}
688
689// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
690// lower case, giving priority to the special casing rules.
691func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
692	return Map(c.ToLower, s)
693}
694
695// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
696// title case, giving priority to the special casing rules.
697func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
698	return Map(c.ToTitle, s)
699}
700
701// ToValidUTF8 treats s as UTF-8-encoded bytes and returns a copy with each run of bytes
702// representing invalid UTF-8 replaced with the bytes in replacement, which may be empty.
703func ToValidUTF8(s, replacement []byte) []byte {
704	b := make([]byte, 0, len(s)+len(replacement))
705	invalid := false // previous byte was from an invalid UTF-8 sequence
706	for i := 0; i < len(s); {
707		c := s[i]
708		if c < utf8.RuneSelf {
709			i++
710			invalid = false
711			b = append(b, c)
712			continue
713		}
714		_, wid := utf8.DecodeRune(s[i:])
715		if wid == 1 {
716			i++
717			if !invalid {
718				invalid = true
719				b = append(b, replacement...)
720			}
721			continue
722		}
723		invalid = false
724		b = append(b, s[i:i+wid]...)
725		i += wid
726	}
727	return b
728}
729
730// isSeparator reports whether the rune could mark a word boundary.
731// TODO: update when package unicode captures more of the properties.
732func isSeparator(r rune) bool {
733	// ASCII alphanumerics and underscore are not separators
734	if r <= 0x7F {
735		switch {
736		case '0' <= r && r <= '9':
737			return false
738		case 'a' <= r && r <= 'z':
739			return false
740		case 'A' <= r && r <= 'Z':
741			return false
742		case r == '_':
743			return false
744		}
745		return true
746	}
747	// Letters and digits are not separators
748	if unicode.IsLetter(r) || unicode.IsDigit(r) {
749		return false
750	}
751	// Otherwise, all we can do for now is treat spaces as separators.
752	return unicode.IsSpace(r)
753}
754
755// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
756// words mapped to their title case.
757//
758// Deprecated: The rule Title uses for word boundaries does not handle Unicode
759// punctuation properly. Use golang.org/x/text/cases instead.
760func Title(s []byte) []byte {
761	// Use a closure here to remember state.
762	// Hackish but effective. Depends on Map scanning in order and calling
763	// the closure once per rune.
764	prev := ' '
765	return Map(
766		func(r rune) rune {
767			if isSeparator(prev) {
768				prev = r
769				return unicode.ToTitle(r)
770			}
771			prev = r
772			return r
773		},
774		s)
775}
776
777// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
778// all leading UTF-8-encoded code points c that satisfy f(c).
779func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
780	i := indexFunc(s, f, false)
781	if i == -1 {
782		return nil
783	}
784	return s[i:]
785}
786
787// TrimRightFunc returns a subslice of s by slicing off all trailing
788// UTF-8-encoded code points c that satisfy f(c).
789func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
790	i := lastIndexFunc(s, f, false)
791	if i >= 0 && s[i] >= utf8.RuneSelf {
792		_, wid := utf8.DecodeRune(s[i:])
793		i += wid
794	} else {
795		i++
796	}
797	return s[0:i]
798}
799
800// TrimFunc returns a subslice of s by slicing off all leading and trailing
801// UTF-8-encoded code points c that satisfy f(c).
802func TrimFunc(s []byte, f func(r rune) bool) []byte {
803	return TrimRightFunc(TrimLeftFunc(s, f), f)
804}
805
806// TrimPrefix returns s without the provided leading prefix string.
807// If s doesn't start with prefix, s is returned unchanged.
808func TrimPrefix(s, prefix []byte) []byte {
809	if HasPrefix(s, prefix) {
810		return s[len(prefix):]
811	}
812	return s
813}
814
815// TrimSuffix returns s without the provided trailing suffix string.
816// If s doesn't end with suffix, s is returned unchanged.
817func TrimSuffix(s, suffix []byte) []byte {
818	if HasSuffix(s, suffix) {
819		return s[:len(s)-len(suffix)]
820	}
821	return s
822}
823
824// IndexFunc interprets s as a sequence of UTF-8-encoded code points.
825// It returns the byte index in s of the first Unicode
826// code point satisfying f(c), or -1 if none do.
827func IndexFunc(s []byte, f func(r rune) bool) int {
828	return indexFunc(s, f, true)
829}
830
831// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
832// It returns the byte index in s of the last Unicode
833// code point satisfying f(c), or -1 if none do.
834func LastIndexFunc(s []byte, f func(r rune) bool) int {
835	return lastIndexFunc(s, f, true)
836}
837
838// indexFunc is the same as IndexFunc except that if
839// truth==false, the sense of the predicate function is
840// inverted.
841func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
842	start := 0
843	for start < len(s) {
844		wid := 1
845		r := rune(s[start])
846		if r >= utf8.RuneSelf {
847			r, wid = utf8.DecodeRune(s[start:])
848		}
849		if f(r) == truth {
850			return start
851		}
852		start += wid
853	}
854	return -1
855}
856
857// lastIndexFunc is the same as LastIndexFunc except that if
858// truth==false, the sense of the predicate function is
859// inverted.
860func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
861	for i := len(s); i > 0; {
862		r, size := rune(s[i-1]), 1
863		if r >= utf8.RuneSelf {
864			r, size = utf8.DecodeLastRune(s[0:i])
865		}
866		i -= size
867		if f(r) == truth {
868			return i
869		}
870	}
871	return -1
872}
873
874// asciiSet is a 32-byte value, where each bit represents the presence of a
875// given ASCII character in the set. The 128-bits of the lower 16 bytes,
876// starting with the least-significant bit of the lowest word to the
877// most-significant bit of the highest word, map to the full range of all
878// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
879// ensuring that any non-ASCII character will be reported as not in the set.
880// This allocates a total of 32 bytes even though the upper half
881// is unused to avoid bounds checks in asciiSet.contains.
882type asciiSet [8]uint32
883
884// makeASCIISet creates a set of ASCII characters and reports whether all
885// characters in chars are ASCII.
886func makeASCIISet(chars string) (as asciiSet, ok bool) {
887	for i := 0; i < len(chars); i++ {
888		c := chars[i]
889		if c >= utf8.RuneSelf {
890			return as, false
891		}
892		as[c/32] |= 1 << (c % 32)
893	}
894	return as, true
895}
896
897// contains reports whether c is inside the set.
898func (as *asciiSet) contains(c byte) bool {
899	return (as[c/32] & (1 << (c % 32))) != 0
900}
901
902// containsRune is a simplified version of strings.ContainsRune
903// to avoid importing the strings package.
904// We avoid bytes.ContainsRune to avoid allocating a temporary copy of s.
905func containsRune(s string, r rune) bool {
906	for _, c := range s {
907		if c == r {
908			return true
909		}
910	}
911	return false
912}
913
914// Trim returns a subslice of s by slicing off all leading and
915// trailing UTF-8-encoded code points contained in cutset.
916func Trim(s []byte, cutset string) []byte {
917	if len(s) == 0 {
918		// This is what we've historically done.
919		return nil
920	}
921	if cutset == "" {
922		return s
923	}
924	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
925		return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
926	}
927	if as, ok := makeASCIISet(cutset); ok {
928		return trimLeftASCII(trimRightASCII(s, &as), &as)
929	}
930	return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
931}
932
933// TrimLeft returns a subslice of s by slicing off all leading
934// UTF-8-encoded code points contained in cutset.
935func TrimLeft(s []byte, cutset string) []byte {
936	if len(s) == 0 {
937		// This is what we've historically done.
938		return nil
939	}
940	if cutset == "" {
941		return s
942	}
943	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
944		return trimLeftByte(s, cutset[0])
945	}
946	if as, ok := makeASCIISet(cutset); ok {
947		return trimLeftASCII(s, &as)
948	}
949	return trimLeftUnicode(s, cutset)
950}
951
952func trimLeftByte(s []byte, c byte) []byte {
953	for len(s) > 0 && s[0] == c {
954		s = s[1:]
955	}
956	if len(s) == 0 {
957		// This is what we've historically done.
958		return nil
959	}
960	return s
961}
962
963func trimLeftASCII(s []byte, as *asciiSet) []byte {
964	for len(s) > 0 {
965		if !as.contains(s[0]) {
966			break
967		}
968		s = s[1:]
969	}
970	if len(s) == 0 {
971		// This is what we've historically done.
972		return nil
973	}
974	return s
975}
976
977func trimLeftUnicode(s []byte, cutset string) []byte {
978	for len(s) > 0 {
979		r, n := rune(s[0]), 1
980		if r >= utf8.RuneSelf {
981			r, n = utf8.DecodeRune(s)
982		}
983		if !containsRune(cutset, r) {
984			break
985		}
986		s = s[n:]
987	}
988	if len(s) == 0 {
989		// This is what we've historically done.
990		return nil
991	}
992	return s
993}
994
995// TrimRight returns a subslice of s by slicing off all trailing
996// UTF-8-encoded code points that are contained in cutset.
997func TrimRight(s []byte, cutset string) []byte {
998	if len(s) == 0 || cutset == "" {
999		return s
1000	}
1001	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
1002		return trimRightByte(s, cutset[0])
1003	}
1004	if as, ok := makeASCIISet(cutset); ok {
1005		return trimRightASCII(s, &as)
1006	}
1007	return trimRightUnicode(s, cutset)
1008}
1009
1010func trimRightByte(s []byte, c byte) []byte {
1011	for len(s) > 0 && s[len(s)-1] == c {
1012		s = s[:len(s)-1]
1013	}
1014	return s
1015}
1016
1017func trimRightASCII(s []byte, as *asciiSet) []byte {
1018	for len(s) > 0 {
1019		if !as.contains(s[len(s)-1]) {
1020			break
1021		}
1022		s = s[:len(s)-1]
1023	}
1024	return s
1025}
1026
1027func trimRightUnicode(s []byte, cutset string) []byte {
1028	for len(s) > 0 {
1029		r, n := rune(s[len(s)-1]), 1
1030		if r >= utf8.RuneSelf {
1031			r, n = utf8.DecodeLastRune(s)
1032		}
1033		if !containsRune(cutset, r) {
1034			break
1035		}
1036		s = s[:len(s)-n]
1037	}
1038	return s
1039}
1040
1041// TrimSpace returns a subslice of s by slicing off all leading and
1042// trailing white space, as defined by Unicode.
1043func TrimSpace(s []byte) []byte {
1044	// Fast path for ASCII: look for the first ASCII non-space byte
1045	start := 0
1046	for ; start < len(s); start++ {
1047		c := s[start]
1048		if c >= utf8.RuneSelf {
1049			// If we run into a non-ASCII byte, fall back to the
1050			// slower unicode-aware method on the remaining bytes
1051			return TrimFunc(s[start:], unicode.IsSpace)
1052		}
1053		if asciiSpace[c] == 0 {
1054			break
1055		}
1056	}
1057
1058	// Now look for the first ASCII non-space byte from the end
1059	stop := len(s)
1060	for ; stop > start; stop-- {
1061		c := s[stop-1]
1062		if c >= utf8.RuneSelf {
1063			return TrimFunc(s[start:stop], unicode.IsSpace)
1064		}
1065		if asciiSpace[c] == 0 {
1066			break
1067		}
1068	}
1069
1070	// At this point s[start:stop] starts and ends with an ASCII
1071	// non-space bytes, so we're done. Non-ASCII cases have already
1072	// been handled above.
1073	if start == stop {
1074		// Special case to preserve previous TrimLeftFunc behavior,
1075		// returning nil instead of empty slice if all spaces.
1076		return nil
1077	}
1078	return s[start:stop]
1079}
1080
1081// Runes interprets s as a sequence of UTF-8-encoded code points.
1082// It returns a slice of runes (Unicode code points) equivalent to s.
1083func Runes(s []byte) []rune {
1084	t := make([]rune, utf8.RuneCount(s))
1085	i := 0
1086	for len(s) > 0 {
1087		r, l := utf8.DecodeRune(s)
1088		t[i] = r
1089		i++
1090		s = s[l:]
1091	}
1092	return t
1093}
1094
1095// Replace returns a copy of the slice s with the first n
1096// non-overlapping instances of old replaced by new.
1097// If old is empty, it matches at the beginning of the slice
1098// and after each UTF-8 sequence, yielding up to k+1 replacements
1099// for a k-rune slice.
1100// If n < 0, there is no limit on the number of replacements.
1101func Replace(s, old, new []byte, n int) []byte {
1102	m := 0
1103	if n != 0 {
1104		// Compute number of replacements.
1105		m = Count(s, old)
1106	}
1107	if m == 0 {
1108		// Just return a copy.
1109		return append([]byte(nil), s...)
1110	}
1111	if n < 0 || m < n {
1112		n = m
1113	}
1114
1115	// Apply replacements to buffer.
1116	t := make([]byte, len(s)+n*(len(new)-len(old)))
1117	w := 0
1118	start := 0
1119	for i := 0; i < n; i++ {
1120		j := start
1121		if len(old) == 0 {
1122			if i > 0 {
1123				_, wid := utf8.DecodeRune(s[start:])
1124				j += wid
1125			}
1126		} else {
1127			j += Index(s[start:], old)
1128		}
1129		w += copy(t[w:], s[start:j])
1130		w += copy(t[w:], new)
1131		start = j + len(old)
1132	}
1133	w += copy(t[w:], s[start:])
1134	return t[0:w]
1135}
1136
1137// ReplaceAll returns a copy of the slice s with all
1138// non-overlapping instances of old replaced by new.
1139// If old is empty, it matches at the beginning of the slice
1140// and after each UTF-8 sequence, yielding up to k+1 replacements
1141// for a k-rune slice.
1142func ReplaceAll(s, old, new []byte) []byte {
1143	return Replace(s, old, new, -1)
1144}
1145
1146// EqualFold reports whether s and t, interpreted as UTF-8 strings,
1147// are equal under simple Unicode case-folding, which is a more general
1148// form of case-insensitivity.
1149func EqualFold(s, t []byte) bool {
1150	for len(s) != 0 && len(t) != 0 {
1151		// Extract first rune from each.
1152		var sr, tr rune
1153		if s[0] < utf8.RuneSelf {
1154			sr, s = rune(s[0]), s[1:]
1155		} else {
1156			r, size := utf8.DecodeRune(s)
1157			sr, s = r, s[size:]
1158		}
1159		if t[0] < utf8.RuneSelf {
1160			tr, t = rune(t[0]), t[1:]
1161		} else {
1162			r, size := utf8.DecodeRune(t)
1163			tr, t = r, t[size:]
1164		}
1165
1166		// If they match, keep going; if not, return false.
1167
1168		// Easy case.
1169		if tr == sr {
1170			continue
1171		}
1172
1173		// Make sr < tr to simplify what follows.
1174		if tr < sr {
1175			tr, sr = sr, tr
1176		}
1177		// Fast check for ASCII.
1178		if tr < utf8.RuneSelf {
1179			// ASCII only, sr/tr must be upper/lower case
1180			if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
1181				continue
1182			}
1183			return false
1184		}
1185
1186		// General case. SimpleFold(x) returns the next equivalent rune > x
1187		// or wraps around to smaller values.
1188		r := unicode.SimpleFold(sr)
1189		for r != sr && r < tr {
1190			r = unicode.SimpleFold(r)
1191		}
1192		if r == tr {
1193			continue
1194		}
1195		return false
1196	}
1197
1198	// One string is empty. Are both?
1199	return len(s) == len(t)
1200}
1201
1202// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
1203func Index(s, sep []byte) int {
1204	n := len(sep)
1205	switch {
1206	case n == 0:
1207		return 0
1208	case n == 1:
1209		return IndexByte(s, sep[0])
1210	case n == len(s):
1211		if Equal(sep, s) {
1212			return 0
1213		}
1214		return -1
1215	case n > len(s):
1216		return -1
1217	case n <= bytealg.MaxLen:
1218		// Use brute force when s and sep both are small
1219		if len(s) <= bytealg.MaxBruteForce {
1220			return bytealg.Index(s, sep)
1221		}
1222		c0 := sep[0]
1223		c1 := sep[1]
1224		i := 0
1225		t := len(s) - n + 1
1226		fails := 0
1227		for i < t {
1228			if s[i] != c0 {
1229				// IndexByte is faster than bytealg.Index, so use it as long as
1230				// we're not getting lots of false positives.
1231				o := IndexByte(s[i+1:t], c0)
1232				if o < 0 {
1233					return -1
1234				}
1235				i += o + 1
1236			}
1237			if s[i+1] == c1 && Equal(s[i:i+n], sep) {
1238				return i
1239			}
1240			fails++
1241			i++
1242			// Switch to bytealg.Index when IndexByte produces too many false positives.
1243			if fails > bytealg.Cutover(i) {
1244				r := bytealg.Index(s[i:], sep)
1245				if r >= 0 {
1246					return r + i
1247				}
1248				return -1
1249			}
1250		}
1251		return -1
1252	}
1253	c0 := sep[0]
1254	c1 := sep[1]
1255	i := 0
1256	fails := 0
1257	t := len(s) - n + 1
1258	for i < t {
1259		if s[i] != c0 {
1260			o := IndexByte(s[i+1:t], c0)
1261			if o < 0 {
1262				break
1263			}
1264			i += o + 1
1265		}
1266		if s[i+1] == c1 && Equal(s[i:i+n], sep) {
1267			return i
1268		}
1269		i++
1270		fails++
1271		if fails >= 4+i>>4 && i < t {
1272			// Give up on IndexByte, it isn't skipping ahead
1273			// far enough to be better than Rabin-Karp.
1274			// Experiments (using IndexPeriodic) suggest
1275			// the cutover is about 16 byte skips.
1276			// TODO: if large prefixes of sep are matching
1277			// we should cutover at even larger average skips,
1278			// because Equal becomes that much more expensive.
1279			// This code does not take that effect into account.
1280			j := bytealg.IndexRabinKarpBytes(s[i:], sep)
1281			if j < 0 {
1282				return -1
1283			}
1284			return i + j
1285		}
1286	}
1287	return -1
1288}
1289
1290// Cut slices s around the first instance of sep,
1291// returning the text before and after sep.
1292// The found result reports whether sep appears in s.
1293// If sep does not appear in s, cut returns s, nil, false.
1294//
1295// Cut returns slices of the original slice s, not copies.
1296func Cut(s, sep []byte) (before, after []byte, found bool) {
1297	if i := Index(s, sep); i >= 0 {
1298		return s[:i], s[i+len(sep):], true
1299	}
1300	return s, nil, false
1301}