go/src/bytes/bytes.go
Ilya Tocar 438818d9f1 bytes: use Index in Count
Similar to https://go-review.googlesource.com/28586,
but for package bytes instead of strings.
This provides simpler code and some performance gain.
Also update strings.Count to use the same code.

On AMD64 with heavily optimized Index I see:

name             old time/op    new time/op     delta
Count/10-6         47.3ns ± 0%     36.8ns ± 0%    -22.35%  (p=0.000 n=10+10)
Count/32-6          286ns ± 0%       38ns ± 0%    -86.71%  (p=0.000 n=10+10)
Count/4K-6         50.1µs ± 0%      4.4µs ± 0%    -91.18%  (p=0.000 n=10+10)
Count/4M-6         48.1ms ± 1%      4.5ms ± 0%    -90.56%  (p=0.000 n=10+9)
Count/64M-6         784ms ± 0%       73ms ± 0%    -90.73%  (p=0.000 n=10+10)
CountEasy/10-6     28.4ns ± 0%     31.0ns ± 0%     +9.23%  (p=0.000 n=10+10)
CountEasy/32-6     30.6ns ± 0%     37.0ns ± 0%    +20.92%  (p=0.000 n=10+10)
CountEasy/4K-6      186ns ± 0%      198ns ± 0%     +6.45%  (p=0.000 n=9+10)
CountEasy/4M-6      233µs ± 2%      234µs ± 2%       ~     (p=0.912 n=10+10)
CountEasy/64M-6    6.70ms ± 0%     6.68ms ± 1%       ~     (p=0.762 n=8+10)

name             old speed      new speed       delta
Count/10-6        211MB/s ± 0%    272MB/s ± 0%    +28.77%  (p=0.000 n=10+9)
Count/32-6        112MB/s ± 0%    842MB/s ± 0%   +652.84%  (p=0.000 n=10+10)
Count/4K-6       81.8MB/s ± 0%  927.6MB/s ± 0%  +1033.63%  (p=0.000 n=10+9)
Count/4M-6       87.2MB/s ± 1%  924.0MB/s ± 0%   +959.25%  (p=0.000 n=10+9)
Count/64M-6      85.6MB/s ± 0%  922.9MB/s ± 0%   +978.31%  (p=0.000 n=10+10)
CountEasy/10-6    352MB/s ± 0%    322MB/s ± 0%     -8.41%  (p=0.000 n=10+10)
CountEasy/32-6   1.05GB/s ± 0%   0.87GB/s ± 0%    -17.35%  (p=0.000 n=9+10)
CountEasy/4K-6   22.0GB/s ± 0%   20.6GB/s ± 0%     -6.33%  (p=0.000 n=10+10)
CountEasy/4M-6   18.0GB/s ± 2%   18.0GB/s ± 2%       ~     (p=0.912 n=10+10)
CountEasy/64M-6  10.0GB/s ± 0%   10.0GB/s ± 1%       ~     (p=0.762 n=8+10)

On 386, without asm version of Index:

Count/10-6         57.0ns ± 0%     56.9ns ± 0%   -0.11%  (p=0.006 n=10+9)
Count/32-6          340ns ± 0%      274ns ± 0%  -19.48%  (p=0.000 n=10+9)
Count/4K-6         49.5µs ± 0%     37.1µs ± 0%  -24.96%  (p=0.000 n=10+10)
Count/4M-6         51.1ms ± 0%     38.2ms ± 0%  -25.21%  (p=0.000 n=10+10)
Count/64M-6         818ms ± 0%      613ms ± 0%  -25.07%  (p=0.000 n=8+10)
CountEasy/10-6     60.0ns ± 0%     70.4ns ± 0%  +17.34%  (p=0.000 n=10+10)
CountEasy/32-6     81.1ns ± 0%     94.0ns ± 0%  +15.97%  (p=0.000 n=9+10)
CountEasy/4K-6     4.37µs ± 0%     4.39µs ± 0%   +0.30%  (p=0.000 n=10+9)
CountEasy/4M-6     4.43ms ± 0%     4.43ms ± 0%     ~     (p=0.579 n=10+10)
CountEasy/64M-6    70.9ms ± 0%     70.9ms ± 0%     ~     (p=0.912 n=10+10)

name             old speed      new speed       delta
Count/10-6        176MB/s ± 0%    176MB/s ± 0%   +0.10%  (p=0.000 n=10+9)
Count/32-6       93.9MB/s ± 0%  116.5MB/s ± 0%  +24.06%  (p=0.000 n=10+9)
Count/4K-6       82.7MB/s ± 0%  110.3MB/s ± 0%  +33.26%  (p=0.000 n=10+10)
Count/4M-6       82.1MB/s ± 0%  109.7MB/s ± 0%  +33.70%  (p=0.000 n=10+10)
Count/64M-6      82.0MB/s ± 0%  109.5MB/s ± 0%  +33.46%  (p=0.000 n=8+10)
CountEasy/10-6    167MB/s ± 0%    142MB/s ± 0%  -14.75%  (p=0.000 n=9+10)
CountEasy/32-6    395MB/s ± 0%    340MB/s ± 0%  -13.77%  (p=0.000 n=10+10)
CountEasy/4K-6    936MB/s ± 0%    934MB/s ± 0%   -0.29%  (p=0.000 n=10+9)
CountEasy/4M-6    947MB/s ± 0%    946MB/s ± 0%     ~     (p=0.591 n=10+10)
CountEasy/64M-6   947MB/s ± 0%    947MB/s ± 0%     ~     (p=0.867 n=10+10)

Change-Id: Ia76b247372b6f5b5d23a9f10253a86536a5153b3
Reviewed-on: https://go-review.googlesource.com/36489
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2017-02-08 17:52:30 +00:00

760 lines
20 KiB
Go

// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bytes implements functions for the manipulation of byte slices.
// It is analogous to the facilities of the strings package.
package bytes
import (
"unicode"
"unicode/utf8"
)
func equalPortable(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i, c := range a {
if c != b[i] {
return false
}
}
return true
}
// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
func explode(s []byte, n int) [][]byte {
if n <= 0 {
n = len(s)
}
a := make([][]byte, n)
var size int
na := 0
for len(s) > 0 {
if na+1 >= n {
a[na] = s
na++
break
}
_, size = utf8.DecodeRune(s)
a[na] = s[0:size]
s = s[size:]
na++
}
return a[0:na]
}
// Count counts the number of non-overlapping instances of sep in s.
// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s.
func Count(s, sep []byte) int {
n := 0
// special case
if len(sep) == 0 {
return utf8.RuneCount(s) + 1
}
for {
i := Index(s, sep)
if i == -1 {
return n
}
n++
s = s[i+len(sep):]
}
}
// Contains reports whether subslice is within b.
func Contains(b, subslice []byte) bool {
return Index(b, subslice) != -1
}
// ContainsAny reports whether any of the UTF-8-encoded Unicode code points in chars are within b.
func ContainsAny(b []byte, chars string) bool {
return IndexAny(b, chars) >= 0
}
// ContainsRune reports whether the Unicode code point r is within b.
func ContainsRune(b []byte, r rune) bool {
return IndexRune(b, r) >= 0
}
func indexBytePortable(s []byte, c byte) int {
for i, b := range s {
if b == c {
return i
}
}
return -1
}
// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
func LastIndex(s, sep []byte) int {
n := len(sep)
if n == 0 {
return len(s)
}
c := sep[0]
for i := len(s) - n; i >= 0; i-- {
if s[i] == c && (n == 1 || Equal(s[i:i+n], sep)) {
return i
}
}
return -1
}
// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
func LastIndexByte(s []byte, c byte) int {
for i := len(s) - 1; i >= 0; i-- {
if s[i] == c {
return i
}
}
return -1
}
// IndexRune interprets s as a sequence of UTF-8-encoded Unicode code points.
// It returns the byte index of the first occurrence in s of the given rune.
// It returns -1 if rune is not present in s.
// If r is utf8.RuneError, it returns the first instance of any
// invalid UTF-8 byte sequence.
func IndexRune(s []byte, r rune) int {
switch {
case 0 <= r && r < utf8.RuneSelf:
return IndexByte(s, byte(r))
case r == utf8.RuneError:
for i := 0; i < len(s); {
r1, n := utf8.DecodeRune(s[i:])
if r1 == utf8.RuneError {
return i
}
i += n
}
return -1
case !utf8.ValidRune(r):
return -1
default:
var b [utf8.UTFMax]byte
n := utf8.EncodeRune(b[:], r)
return Index(s, b[:n])
}
}
// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
// It returns the byte index of the first occurrence in s of any of the Unicode
// code points in chars. It returns -1 if chars is empty or if there is no code
// point in common.
func IndexAny(s []byte, chars string) int {
if len(chars) > 0 {
if len(s) > 8 {
if as, isASCII := makeASCIISet(chars); isASCII {
for i, c := range s {
if as.contains(c) {
return i
}
}
return -1
}
}
var width int
for i := 0; i < len(s); i += width {
r := rune(s[i])
if r < utf8.RuneSelf {
width = 1
} else {
r, width = utf8.DecodeRune(s[i:])
}
for _, ch := range chars {
if r == ch {
return i
}
}
}
}
return -1
}
// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
// points. It returns the byte index of the last occurrence in s of any of
// the Unicode code points in chars. It returns -1 if chars is empty or if
// there is no code point in common.
func LastIndexAny(s []byte, chars string) int {
if len(chars) > 0 {
if len(s) > 8 {
if as, isASCII := makeASCIISet(chars); isASCII {
for i := len(s) - 1; i >= 0; i-- {
if as.contains(s[i]) {
return i
}
}
return -1
}
}
for i := len(s); i > 0; {
r, size := utf8.DecodeLastRune(s[:i])
i -= size
for _, c := range chars {
if r == c {
return i
}
}
}
}
return -1
}
// Generic split: splits after each instance of sep,
// including sepSave bytes of sep in the subslices.
func genSplit(s, sep []byte, sepSave, n int) [][]byte {
if n == 0 {
return nil
}
if len(sep) == 0 {
return explode(s, n)
}
if n < 0 {
n = Count(s, sep) + 1
}
c := sep[0]
start := 0
a := make([][]byte, n)
na := 0
for i := 0; i+len(sep) <= len(s) && na+1 < n; i++ {
if s[i] == c && (len(sep) == 1 || Equal(s[i:i+len(sep)], sep)) {
a[na] = s[start : i+sepSave]
na++
start = i + len(sep)
i += len(sep) - 1
}
}
a[na] = s[start:]
return a[0 : na+1]
}
// SplitN slices s into subslices separated by sep and returns a slice of
// the subslices between those separators.
// If sep is empty, SplitN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into subslices after each instance of sep and
// returns a slice of those subslices.
// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
func SplitAfterN(s, sep []byte, n int) [][]byte {
return genSplit(s, sep, len(sep), n)
}
// Split slices s into all subslices separated by sep and returns a slice of
// the subslices between those separators.
// If sep is empty, Split splits after each UTF-8 sequence.
// It is equivalent to SplitN with a count of -1.
func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all subslices after each instance of sep and
// returns a slice of those subslices.
// If sep is empty, SplitAfter splits after each UTF-8 sequence.
// It is equivalent to SplitAfterN with a count of -1.
func SplitAfter(s, sep []byte) [][]byte {
return genSplit(s, sep, len(sep), -1)
}
// Fields splits the slice s around each instance of one or more consecutive white space
// characters, returning a slice of subslices of s or an empty list if s contains only white space.
func Fields(s []byte) [][]byte {
return FieldsFunc(s, unicode.IsSpace)
}
// FieldsFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
// It splits the slice s at each run of code points c satisfying f(c) and
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
// len(s) == 0, an empty slice is returned.
// FieldsFunc makes no guarantees about the order in which it calls f(c).
// If f does not return consistent results for a given c, FieldsFunc may crash.
func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
n := 0
inField := false
for i := 0; i < len(s); {
r, size := utf8.DecodeRune(s[i:])
wasInField := inField
inField = !f(r)
if inField && !wasInField {
n++
}
i += size
}
a := make([][]byte, n)
na := 0
fieldStart := -1
for i := 0; i <= len(s) && na < n; {
r, size := utf8.DecodeRune(s[i:])
if fieldStart < 0 && size > 0 && !f(r) {
fieldStart = i
i += size
continue
}
if fieldStart >= 0 && (size == 0 || f(r)) {
a[na] = s[fieldStart:i]
na++
fieldStart = -1
}
if size == 0 {
break
}
i += size
}
return a[0:na]
}
// Join concatenates the elements of s to create a new byte slice. The separator
// sep is placed between elements in the resulting slice.
func Join(s [][]byte, sep []byte) []byte {
if len(s) == 0 {
return []byte{}
}
if len(s) == 1 {
// Just return a copy.
return append([]byte(nil), s[0]...)
}
n := len(sep) * (len(s) - 1)
for _, v := range s {
n += len(v)
}
b := make([]byte, n)
bp := copy(b, s[0])
for _, v := range s[1:] {
bp += copy(b[bp:], sep)
bp += copy(b[bp:], v)
}
return b
}
// HasPrefix tests whether the byte slice s begins with prefix.
func HasPrefix(s, prefix []byte) bool {
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
}
// HasSuffix tests whether the byte slice s ends with suffix.
func HasSuffix(s, suffix []byte) bool {
return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
}
// Map returns a copy of the byte slice s with all its characters modified
// according to the mapping function. If mapping returns a negative value, the character is
// dropped from the string with no replacement. The characters in s and the
// output are interpreted as UTF-8-encoded Unicode code points.
func Map(mapping func(r rune) rune, s []byte) []byte {
// In the worst case, the slice can grow when mapped, making
// things unpleasant. But it's so rare we barge in assuming it's
// fine. It could also shrink but that falls out naturally.
maxbytes := len(s) // length of b
nbytes := 0 // number of bytes encoded in b
b := make([]byte, maxbytes)
for i := 0; i < len(s); {
wid := 1
r := rune(s[i])
if r >= utf8.RuneSelf {
r, wid = utf8.DecodeRune(s[i:])
}
r = mapping(r)
if r >= 0 {
rl := utf8.RuneLen(r)
if rl < 0 {
rl = len(string(utf8.RuneError))
}
if nbytes+rl > maxbytes {
// Grow the buffer.
maxbytes = maxbytes*2 + utf8.UTFMax
nb := make([]byte, maxbytes)
copy(nb, b[0:nbytes])
b = nb
}
nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
}
i += wid
}
return b[0:nbytes]
}
// Repeat returns a new byte slice consisting of count copies of b.
//
// It panics if count is negative or if
// the result of (len(b) * count) overflows.
func Repeat(b []byte, count int) []byte {
// Since we cannot return an error on overflow,
// we should panic if the repeat will generate
// an overflow.
// See Issue golang.org/issue/16237.
if count < 0 {
panic("bytes: negative Repeat count")
} else if count > 0 && len(b)*count/count != len(b) {
panic("bytes: Repeat count causes overflow")
}
nb := make([]byte, len(b)*count)
bp := copy(nb, b)
for bp < len(nb) {
copy(nb[bp:], nb[:bp])
bp *= 2
}
return nb
}
// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to their upper case.
func ToUpper(s []byte) []byte { return Map(unicode.ToUpper, s) }
// ToLower returns a copy of the byte slice s with all Unicode letters mapped to their lower case.
func ToLower(s []byte) []byte { return Map(unicode.ToLower, s) }
// ToTitle returns a copy of the byte slice s with all Unicode letters mapped to their title case.
func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
// ToUpperSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
// upper case, giving priority to the special casing rules.
func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return c.ToUpper(r) }, s)
}
// ToLowerSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
// lower case, giving priority to the special casing rules.
func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return c.ToLower(r) }, s)
}
// ToTitleSpecial returns a copy of the byte slice s with all Unicode letters mapped to their
// title case, giving priority to the special casing rules.
func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(func(r rune) rune { return c.ToTitle(r) }, s)
}
// isSeparator reports whether the rune could mark a word boundary.
// TODO: update when package unicode captures more of the properties.
func isSeparator(r rune) bool {
// ASCII alphanumerics and underscore are not separators
if r <= 0x7F {
switch {
case '0' <= r && r <= '9':
return false
case 'a' <= r && r <= 'z':
return false
case 'A' <= r && r <= 'Z':
return false
case r == '_':
return false
}
return true
}
// Letters and digits are not separators
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return false
}
// Otherwise, all we can do for now is treat spaces as separators.
return unicode.IsSpace(r)
}
// Title returns a copy of s with all Unicode letters that begin words
// mapped to their title case.
//
// BUG(rsc): The rule Title uses for word boundaries does not handle Unicode punctuation properly.
func Title(s []byte) []byte {
// Use a closure here to remember state.
// Hackish but effective. Depends on Map scanning in order and calling
// the closure once per rune.
prev := ' '
return Map(
func(r rune) rune {
if isSeparator(prev) {
prev = r
return unicode.ToTitle(r)
}
prev = r
return r
},
s)
}
// TrimLeftFunc returns a subslice of s by slicing off all leading UTF-8-encoded
// Unicode code points c that satisfy f(c).
func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
i := indexFunc(s, f, false)
if i == -1 {
return nil
}
return s[i:]
}
// TrimRightFunc returns a subslice of s by slicing off all trailing UTF-8
// encoded Unicode code points c that satisfy f(c).
func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
i := lastIndexFunc(s, f, false)
if i >= 0 && s[i] >= utf8.RuneSelf {
_, wid := utf8.DecodeRune(s[i:])
i += wid
} else {
i++
}
return s[0:i]
}
// TrimFunc returns a subslice of s by slicing off all leading and trailing
// UTF-8-encoded Unicode code points c that satisfy f(c).
func TrimFunc(s []byte, f func(r rune) bool) []byte {
return TrimRightFunc(TrimLeftFunc(s, f), f)
}
// TrimPrefix returns s without the provided leading prefix string.
// If s doesn't start with prefix, s is returned unchanged.
func TrimPrefix(s, prefix []byte) []byte {
if HasPrefix(s, prefix) {
return s[len(prefix):]
}
return s
}
// TrimSuffix returns s without the provided trailing suffix string.
// If s doesn't end with suffix, s is returned unchanged.
func TrimSuffix(s, suffix []byte) []byte {
if HasSuffix(s, suffix) {
return s[:len(s)-len(suffix)]
}
return s
}
// IndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
// It returns the byte index in s of the first Unicode
// code point satisfying f(c), or -1 if none do.
func IndexFunc(s []byte, f func(r rune) bool) int {
return indexFunc(s, f, true)
}
// LastIndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
// It returns the byte index in s of the last Unicode
// code point satisfying f(c), or -1 if none do.
func LastIndexFunc(s []byte, f func(r rune) bool) int {
return lastIndexFunc(s, f, true)
}
// indexFunc is the same as IndexFunc except that if
// truth==false, the sense of the predicate function is
// inverted.
func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
start := 0
for start < len(s) {
wid := 1
r := rune(s[start])
if r >= utf8.RuneSelf {
r, wid = utf8.DecodeRune(s[start:])
}
if f(r) == truth {
return start
}
start += wid
}
return -1
}
// lastIndexFunc is the same as LastIndexFunc except that if
// truth==false, the sense of the predicate function is
// inverted.
func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
for i := len(s); i > 0; {
r, size := rune(s[i-1]), 1
if r >= utf8.RuneSelf {
r, size = utf8.DecodeLastRune(s[0:i])
}
i -= size
if f(r) == truth {
return i
}
}
return -1
}
// asciiSet is a 32-byte value, where each bit represents the presence of a
// given ASCII character in the set. The 128-bits of the lower 16 bytes,
// starting with the least-significant bit of the lowest word to the
// most-significant bit of the highest word, map to the full range of all
// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
// ensuring that any non-ASCII character will be reported as not in the set.
type asciiSet [8]uint32
// makeASCIISet creates a set of ASCII characters and reports whether all
// characters in chars are ASCII.
func makeASCIISet(chars string) (as asciiSet, ok bool) {
for i := 0; i < len(chars); i++ {
c := chars[i]
if c >= utf8.RuneSelf {
return as, false
}
as[c>>5] |= 1 << uint(c&31)
}
return as, true
}
// contains reports whether c is inside the set.
func (as *asciiSet) contains(c byte) bool {
return (as[c>>5] & (1 << uint(c&31))) != 0
}
func makeCutsetFunc(cutset string) func(r rune) bool {
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return func(r rune) bool {
return r == rune(cutset[0])
}
}
if as, isASCII := makeASCIISet(cutset); isASCII {
return func(r rune) bool {
return r < utf8.RuneSelf && as.contains(byte(r))
}
}
return func(r rune) bool {
for _, c := range cutset {
if c == r {
return true
}
}
return false
}
}
// Trim returns a subslice of s by slicing off all leading and
// trailing UTF-8-encoded Unicode code points contained in cutset.
func Trim(s []byte, cutset string) []byte {
return TrimFunc(s, makeCutsetFunc(cutset))
}
// TrimLeft returns a subslice of s by slicing off all leading
// UTF-8-encoded Unicode code points contained in cutset.
func TrimLeft(s []byte, cutset string) []byte {
return TrimLeftFunc(s, makeCutsetFunc(cutset))
}
// TrimRight returns a subslice of s by slicing off all trailing
// UTF-8-encoded Unicode code points that are contained in cutset.
func TrimRight(s []byte, cutset string) []byte {
return TrimRightFunc(s, makeCutsetFunc(cutset))
}
// TrimSpace returns a subslice of s by slicing off all leading and
// trailing white space, as defined by Unicode.
func TrimSpace(s []byte) []byte {
return TrimFunc(s, unicode.IsSpace)
}
// Runes returns a slice of runes (Unicode code points) equivalent to s.
func Runes(s []byte) []rune {
t := make([]rune, utf8.RuneCount(s))
i := 0
for len(s) > 0 {
r, l := utf8.DecodeRune(s)
t[i] = r
i++
s = s[l:]
}
return t
}
// Replace returns a copy of the slice s with the first n
// non-overlapping instances of old replaced by new.
// If old is empty, it matches at the beginning of the slice
// and after each UTF-8 sequence, yielding up to k+1 replacements
// for a k-rune slice.
// If n < 0, there is no limit on the number of replacements.
func Replace(s, old, new []byte, n int) []byte {
m := 0
if n != 0 {
// Compute number of replacements.
m = Count(s, old)
}
if m == 0 {
// Just return a copy.
return append([]byte(nil), s...)
}
if n < 0 || m < n {
n = m
}
// Apply replacements to buffer.
t := make([]byte, len(s)+n*(len(new)-len(old)))
w := 0
start := 0
for i := 0; i < n; i++ {
j := start
if len(old) == 0 {
if i > 0 {
_, wid := utf8.DecodeRune(s[start:])
j += wid
}
} else {
j += Index(s[start:], old)
}
w += copy(t[w:], s[start:j])
w += copy(t[w:], new)
start = j + len(old)
}
w += copy(t[w:], s[start:])
return t[0:w]
}
// EqualFold reports whether s and t, interpreted as UTF-8 strings,
// are equal under Unicode case-folding.
func EqualFold(s, t []byte) bool {
for len(s) != 0 && len(t) != 0 {
// Extract first rune from each.
var sr, tr rune
if s[0] < utf8.RuneSelf {
sr, s = rune(s[0]), s[1:]
} else {
r, size := utf8.DecodeRune(s)
sr, s = r, s[size:]
}
if t[0] < utf8.RuneSelf {
tr, t = rune(t[0]), t[1:]
} else {
r, size := utf8.DecodeRune(t)
tr, t = r, t[size:]
}
// If they match, keep going; if not, return false.
// Easy case.
if tr == sr {
continue
}
// Make sr < tr to simplify what follows.
if tr < sr {
tr, sr = sr, tr
}
// Fast check for ASCII.
if tr < utf8.RuneSelf && 'A' <= sr && sr <= 'Z' {
// ASCII, and sr is upper case. tr must be lower case.
if tr == sr+'a'-'A' {
continue
}
return false
}
// General case. SimpleFold(x) returns the next equivalent rune > x
// or wraps around to smaller values.
r := unicode.SimpleFold(sr)
for r != sr && r < tr {
r = unicode.SimpleFold(r)
}
if r == tr {
continue
}
return false
}
// One string is empty. Are both?
return len(s) == len(t)
}