TUN-9016: update go to 1.24

## Summary

Update several moving parts of cloudflared build system:

* use goboring 1.24.2 in cfsetup
* update linter and fix lint issues
* update packages namely **quic-go and net**
* install script for macos
* update docker files to use go 1.24.1
* remove usage of cloudflare-go
* pin golang linter

Closes TUN-9016
This commit is contained in:
Luis Neto
2025-06-06 09:05:49 +00:00
parent e144eac2af
commit 96ce66bd30
585 changed files with 23572 additions and 21356 deletions

View File

@@ -22,13 +22,13 @@ type Export struct{}
// NewError formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
func (Export) NewError(f string, x ...interface{}) error {
func (Export) NewError(f string, x ...any) error {
return errors.New(f, x...)
}
// enum is any enum type generated by protoc-gen-go
// and must be a named int32 type.
type enum = interface{}
type enum = any
// EnumOf returns the protoreflect.Enum interface over e.
// It returns nil if e is nil.
@@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu
// message is any message type generated by protoc-gen-go
// and must be a pointer to a named struct type.
type message = interface{}
type message = any
// legacyMessageWrapper wraps a v2 message as a v1 message.
type legacyMessageWrapper struct{ m protoreflect.ProtoMessage }

View File

@@ -0,0 +1,128 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"strconv"
"sync/atomic"
"unsafe"
"google.golang.org/protobuf/reflect/protoreflect"
)
func (Export) UnmarshalField(msg any, fieldNum int32) {
UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
}
// Present checks the presence set for a certain field number (zero
// based, ordered by appearance in original proto file). part is
// a pointer to the correct element in the bitmask array, num is the
// field number unaltered. Example (field number 70 -> part =
// &m.XXX_presence[1], num = 70)
func (Export) Present(part *uint32, num uint32) bool {
// This hook will read an unprotected shadow presence set if
// we're unning under the race detector
raceDetectHookPresent(part, num)
return atomic.LoadUint32(part)&(1<<(num%32)) > 0
}
// SetPresent adds a field to the presence set. part is a pointer to
// the relevant element in the array and num is the field number
// unaltered. size is the number of fields in the protocol
// buffer.
func (Export) SetPresent(part *uint32, num uint32, size uint32) {
// This hook will mutate an unprotected shadow presence set if
// we're running under the race detector
raceDetectHookSetPresent(part, num, presenceSize(size))
for {
old := atomic.LoadUint32(part)
if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
return
}
}
}
// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
// It is meant for use by builder methods, where the message is known not
// to be accessible yet by other goroutines.
func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
// This hook will mutate an unprotected shadow presence set if
// we're running under the race detector
raceDetectHookSetPresent(part, num, presenceSize(size))
*part |= 1 << (num % 32)
}
// ClearPresence removes a field from the presence set. part is a
// pointer to the relevant element in the presence array and num is
// the field number unaltered.
func (Export) ClearPresent(part *uint32, num uint32) {
// This hook will mutate an unprotected shadow presence set if
// we're running under the race detector
raceDetectHookClearPresent(part, num)
for {
old := atomic.LoadUint32(part)
if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
return
}
}
}
// interfaceToPointer takes a pointer to an empty interface whose value is a
// pointer type, and converts it into a "pointer" that points to the same
// target
func interfaceToPointer(i *any) pointer {
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
func (p pointer) atomicGetPointer() pointer {
return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
}
func (p pointer) atomicSetPointer(q pointer) {
atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
}
// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
// pointer) and returns true if the pointed-to pointer is nil (using an
// atomic load). This function is inlineable and, on x86, just becomes a
// simple load and compare.
func (Export) AtomicCheckPointerIsNil(ptr any) bool {
return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
}
// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
// second is a pointer) and atomically sets the second pointer into location
// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline
// (even on x86), so this does not become a simple store on x86.
func (Export) AtomicSetPointer(dstPtr, valPtr any) {
interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
}
// AtomicLoadPointer loads the pointer at the location pointed at by src,
// and stores that pointer value into the location pointed at by dst.
func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
}
// AtomicInitializePointer makes ptr and dst point to the same value.
//
// If *ptr is a nil pointer, it sets *ptr = *dst.
//
// If *ptr is a non-nil pointer, it sets *dst = *ptr.
func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
}
}
// MessageFieldStringOf returns the field formatted as a string,
// either as the field name if resolvable otherwise as a decimal string.
func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
fd := md.Fields().ByNumber(n)
if fd != nil {
return string(fd.Name())
}
return strconv.Itoa(int(n))
}

View File

@@ -0,0 +1,34 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !race
package impl
// There is no additional data as we're not running under race detector.
type RaceDetectHookData struct{}
// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
func (presence) raceDetectHookPresent(num uint32) {}
func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
func (presence) raceDetectHookClearPresent(num uint32) {}
func (presence) raceDetectHookAllocAndCopy(src presence) {}
// raceDetectHookPresent is called by the generated file interface
// (*proto.internalFuncs) Present to optionally read an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookPresent(field *uint32, num uint32) {}
// raceDetectHookSetPresent is called by the generated file interface
// (*proto.internalFuncs) SetPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
// raceDetectHookClearPresent is called by the generated file interface
// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookClearPresent(field *uint32, num uint32) {}

View File

@@ -0,0 +1,126 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build race
package impl
// When running under race detector, we add a presence map of bytes, that we can access
// in the hook functions so that we trigger the race detection whenever we have concurrent
// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
type RaceDetectHookData struct {
shadowPresence *[]byte
}
// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
// using non-atomic operations.
func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
sp := make([]byte, size)
atomicStoreShadowPresence(&data.shadowPresence, &sp)
}
func (p presence) raceDetectHookPresent(num uint32) {
data := p.toRaceDetectData()
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
_ = (*sp)[num]
}
}
func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
data := p.toRaceDetectData()
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp == nil {
data.raceDetectHookAlloc(size)
sp = atomicLoadShadowPresence(&data.shadowPresence)
}
(*sp)[num] = 1
}
func (p presence) raceDetectHookClearPresent(num uint32) {
data := p.toRaceDetectData()
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
(*sp)[num] = 0
}
}
// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
// shadowPresence bytes from src to lazy.
func (p presence) raceDetectHookAllocAndCopy(q presence) {
sData := q.toRaceDetectData()
dData := p.toRaceDetectData()
if sData == nil {
return
}
srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
if srcSp == nil {
atomicStoreShadowPresence(&dData.shadowPresence, nil)
return
}
n := len(*srcSp)
dSlice := make([]byte, n)
atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
for i := 0; i < n; i++ {
dSlice[i] = (*srcSp)[i]
}
}
// raceDetectHookPresent is called by the generated file interface
// (*proto.internalFuncs) Present to optionally read an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookPresent(field *uint32, num uint32) {
data := findPointerToRaceDetectData(field, num)
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
_ = (*sp)[num]
}
}
// raceDetectHookSetPresent is called by the generated file interface
// (*proto.internalFuncs) SetPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
data := findPointerToRaceDetectData(field, num)
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp == nil {
data.raceDetectHookAlloc(size)
sp = atomicLoadShadowPresence(&data.shadowPresence)
}
(*sp)[num] = 1
}
// raceDetectHookClearPresent is called by the generated file interface
// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookClearPresent(field *uint32, num uint32) {
data := findPointerToRaceDetectData(field, num)
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
(*sp)[num] = 0
}
}

View File

@@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
}
return nil
}
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
}
if mi.extensionOffset.IsValid() {
e := p.Apply(mi.extensionOffset).Extensions()
if err := mi.isInitExtensions(e); err != nil {
@@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
if !f.isRequired && f.funcs.isInit == nil {
continue
}
if f.presenceIndex != noPresence {
if !presence.Present(f.presenceIndex) {
if f.isRequired {
return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
}
continue
}
if f.funcs.isInit != nil {
f.mi.init()
if f.mi.needsInitCheck {
if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
if !lazy.AllowedPartial() {
// Nothing to see here, it was checked on unmarshal
continue
}
mi.lazyUnmarshal(p, f.num)
}
if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
return err
}
}
}
continue
}
fptr := p.Apply(f.offset)
if f.isPointer && fptr.Elem().IsNil() {
if f.isRequired {
@@ -68,7 +101,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
}
for _, x := range *ext {
ei := getExtensionFieldInfo(x.Type())
if ei.funcs.isInit == nil {
if ei.funcs.isInit == nil || x.isUnexpandedLazy() {
continue
}
v := x.Value()

View File

@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
xi *extensionFieldInfo
value protoreflect.Value
b []byte
fn func() protoreflect.Value
}
type ExtensionField struct {
@@ -99,6 +98,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool {
return false
}
// isUnexpandedLazy returns true if the ExensionField is lazy and not
// yet expanded, which means it's present and already checked for
// initialized required fields.
func (f *ExtensionField) isUnexpandedLazy() bool {
return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
}
// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded.
//
// The returned buffer has to be kept over whatever operation we're planning,
// as re-retrieving it will fail after the message is lazily decoded.
func (f *ExtensionField) lazyBuffer() []byte {
// This function might be in the critical path, so check the atomic without
// taking a look first, then only take the lock if needed.
if !f.isUnexpandedLazy() {
return nil
}
f.lazy.mu.Lock()
defer f.lazy.mu.Unlock()
return f.lazy.b
}
func (f *ExtensionField) lazyInit() {
f.lazy.mu.Lock()
defer f.lazy.mu.Unlock()
@@ -136,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
}
f.lazy.value = val
} else {
f.lazy.value = f.lazy.fn()
panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@@ -152,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
f.lazy = nil
}
// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
// This must not be called concurrently.
func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
f.typ = t
f.lazy = &lazyExtensionValue{fn: fn}
}
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {

View File

@@ -5,15 +5,12 @@
package impl
import (
"fmt"
"reflect"
"sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
)
@@ -65,6 +62,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
if err != nil {
return out, err
}
if cf.funcs.isInit == nil {
out.initialized = true
}
vi.Set(vw)
return out, nil
}
@@ -118,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
}
}
func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs {
var once sync.Once
var messageType protoreflect.MessageType
lazyInit := func() {
once.Do(func() {
messageName := fd.Message().FullName()
messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
})
}
return pointerCoderFuncs{
size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
m, ok := p.WeakFields().get(f.num)
if !ok {
return 0
}
lazyInit()
if messageType == nil {
panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
}
return sizeMessage(m, f.tagsize, opts)
},
marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
m, ok := p.WeakFields().get(f.num)
if !ok {
return b, nil
}
lazyInit()
if messageType == nil {
panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
}
return appendMessage(b, m, f.wiretag, opts)
},
unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
fs := p.WeakFields()
m, ok := fs.get(f.num)
if !ok {
lazyInit()
if messageType == nil {
return unmarshalOutput{}, errUnknown
}
m = messageType.New().Interface()
fs.set(f.num, m)
}
return consumeMessage(b, m, wtyp, opts)
},
isInit: func(p pointer, f *coderFieldInfo) error {
m, ok := p.WeakFields().get(f.num)
if !ok {
return nil
}
return proto.CheckInitialized(m)
},
merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
sm, ok := src.WeakFields().get(f.num)
if !ok {
return
}
dm, ok := dst.WeakFields().get(f.num)
if !ok {
lazyInit()
if messageType == nil {
panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
}
dm = messageType.New().Interface()
dst.WeakFields().set(f.num, dm)
}
opts.Merge(dm, sm)
},
}
}
func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{

View File

@@ -0,0 +1,264 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"reflect"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/reflect/protoreflect"
)
func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
mi := getMessageInfo(ft)
if mi == nil {
panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
}
switch fd.Kind() {
case protoreflect.MessageKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueMessage,
marshal: appendOpaqueMessage,
unmarshal: consumeOpaqueMessage,
isInit: isInitOpaqueMessage,
merge: mergeOpaqueMessage,
}
case protoreflect.GroupKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueGroup,
marshal: appendOpaqueGroup,
unmarshal: consumeOpaqueGroup,
isInit: isInitOpaqueMessage,
merge: mergeOpaqueMessage,
}
}
panic("unexpected field kind")
}
func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
}
func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
mp := p.AtomicGetPointer()
calculatedSize := f.mi.sizePointer(mp, opts)
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(calculatedSize))
before := len(b)
b, err := f.mi.marshalAppendPointer(b, mp, opts)
if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
}
return b, err
}
func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
mp := p.AtomicGetPointer()
if mp.IsNil() {
mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
}
o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
if err != nil {
return out, err
}
out.n = n
out.initialized = o.initialized
return out, nil
}
func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
mp := p.AtomicGetPointer()
if mp.IsNil() {
return nil
}
return f.mi.checkInitializedPointer(mp)
}
func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
dstmp := dst.AtomicGetPointer()
if dstmp.IsNil() {
dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
}
f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
}
func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
}
func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, f.wiretag) // start group
b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
b = protowire.AppendVarint(b, f.wiretag+1) // end group
return b, err
}
func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.StartGroupType {
return out, errUnknown
}
mp := p.AtomicGetPointer()
if mp.IsNil() {
mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
}
o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
return o, e
}
func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
}
mt := ft.Elem().Elem() // *[]*T -> *T
mi := getMessageInfo(mt)
if mi == nil {
panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
}
switch fd.Kind() {
case protoreflect.MessageKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueMessageSlice,
marshal: appendOpaqueMessageSlice,
unmarshal: consumeOpaqueMessageSlice,
isInit: isInitOpaqueMessageSlice,
merge: mergeOpaqueMessageSlice,
}
case protoreflect.GroupKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueGroupSlice,
marshal: appendOpaqueGroupSlice,
unmarshal: consumeOpaqueGroupSlice,
isInit: isInitOpaqueMessageSlice,
merge: mergeOpaqueMessageSlice,
}
}
panic("unexpected field kind")
}
func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.AtomicGetPointer().PointerSlice()
n := 0
for _, v := range s {
n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
}
return n
}
func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.AtomicGetPointer().PointerSlice()
var err error
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
siz := f.mi.sizePointer(v, opts)
b = protowire.AppendVarint(b, uint64(siz))
before := len(b)
b, err = f.mi.marshalAppendPointer(b, v, opts)
if err != nil {
return b, err
}
if measuredSize := len(b) - before; siz != measuredSize {
return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
}
}
return b, nil
}
func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
if err != nil {
return out, err
}
sp := p.AtomicGetPointer()
if sp.IsNil() {
sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
}
sp.AppendPointerSlice(mp)
out.n = n
out.initialized = o.initialized
return out, nil
}
func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
sp := p.AtomicGetPointer()
if sp.IsNil() {
return nil
}
s := sp.PointerSlice()
for _, v := range s {
if err := f.mi.checkInitializedPointer(v); err != nil {
return err
}
}
return nil
}
func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
ds := dst.AtomicGetPointer()
if ds.IsNil() {
ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
}
for _, sp := range src.AtomicGetPointer().PointerSlice() {
dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
f.mi.mergePointer(dm, sp, opts)
ds.AppendPointerSlice(dm)
}
}
func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.AtomicGetPointer().PointerSlice()
n := 0
for _, v := range s {
n += 2*f.tagsize + f.mi.sizePointer(v, opts)
}
return n
}
func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.AtomicGetPointer().PointerSlice()
var err error
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag) // start group
b, err = f.mi.marshalAppendPointer(b, v, opts)
if err != nil {
return b, err
}
b = protowire.AppendVarint(b, f.wiretag+1) // end group
}
return b, nil
}
func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.StartGroupType {
return out, errUnknown
}
mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
if err != nil {
return out, err
}
sp := p.AtomicGetPointer()
if sp.IsNil() {
sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
}
sp.AppendPointerSlice(mp)
return out, err
}

View File

@@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
return 0
}
n := 0
iter := mapRange(mapv)
iter := mapv.MapRange()
for iter.Next() {
key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
@@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o
if opts.Deterministic() {
return appendMapDeterministic(b, mapv, mapi, f, opts)
}
iter := mapRange(mapv)
iter := mapv.MapRange()
for iter.Next() {
var err error
b = protowire.AppendVarint(b, f.wiretag)
@@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
if !mi.needsInitCheck {
return nil
}
iter := mapRange(mapv)
iter := mapv.MapRange()
for iter.Next() {
val := pointerOfValue(iter.Value())
if err := mi.checkInitializedPointer(val); err != nil {
@@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
}
}
} else {
iter := mapRange(mapv)
iter := mapv.MapRange()
for iter.Next() {
val := mapi.conv.valConv.PBValueOf(iter.Value())
if err := mapi.valFuncs.isInit(val); err != nil {
@@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
iter := mapRange(srcm)
iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), iter.Value())
}
@@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
iter := mapRange(srcm)
iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
}
@@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
iter := mapRange(srcm)
iter := srcm.MapRange()
for iter.Next() {
val := reflect.New(f.ft.Elem().Elem())
if f.mi != nil {

View File

@@ -1,38 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.12
// +build !go1.12
package impl
import "reflect"
type mapIter struct {
v reflect.Value
keys []reflect.Value
}
// mapRange provides a less-efficient equivalent to
// the Go 1.12 reflect.Value.MapRange method.
func mapRange(v reflect.Value) *mapIter {
return &mapIter{v: v}
}
func (i *mapIter) Next() bool {
if i.keys == nil {
i.keys = i.v.MapKeys()
} else {
i.keys = i.keys[1:]
}
return len(i.keys) > 0
}
func (i *mapIter) Key() reflect.Value {
return i.keys[0]
}
func (i *mapIter) Value() reflect.Value {
return i.v.MapIndex(i.keys[0])
}

View File

@@ -1,12 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.12
// +build go1.12
package impl
import "reflect"
func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }

View File

@@ -32,6 +32,10 @@ type coderMessageInfo struct {
needsInitCheck bool
isMessageSet bool
numRequiredFields uint8
lazyOffset offset
presenceOffset offset
presenceSize presenceSize
}
type coderFieldInfo struct {
@@ -45,12 +49,19 @@ type coderFieldInfo struct {
tagsize int // size of the varint-encoded tag
isPointer bool // true if IsNil may be called on the struct field
isRequired bool // true if field is required
isLazy bool
presenceIndex uint32
}
const noPresence = 0xffffffff
func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
mi.sizecacheOffset = invalidOffset
mi.unknownOffset = invalidOffset
mi.extensionOffset = invalidOffset
mi.lazyOffset = invalidOffset
mi.presenceOffset = si.presenceOffset
if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
mi.sizecacheOffset = si.sizecacheOffset
@@ -107,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
},
}
case isOneof:
fieldOffset = offsetOf(fs, mi.Exporter)
case fd.IsWeak():
fieldOffset = si.weakOffset
funcs = makeWeakMessageFieldCoder(fd)
fieldOffset = offsetOf(fs)
default:
fieldOffset = offsetOf(fs, mi.Exporter)
fieldOffset = offsetOf(fs)
childMessage, funcs = fieldCoder(fd, ft)
}
cf := &preallocFields[i]
@@ -127,6 +135,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
validation: newFieldValidationInfo(mi, si, fd, ft),
isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
isRequired: fd.Cardinality() == protoreflect.Required,
presenceIndex: noPresence,
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
@@ -189,6 +199,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
if mi.methods.Equal == nil {
mi.methods.Equal = equal
}
}
// getUnknownBytes returns a *[]byte for the unknown fields.

View File

@@ -0,0 +1,153 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"reflect"
"sort"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/reflect/protoreflect"
piface "google.golang.org/protobuf/runtime/protoiface"
)
func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
mi.sizecacheOffset = si.sizecacheOffset
mi.unknownOffset = si.unknownOffset
mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
mi.extensionOffset = si.extensionOffset
mi.lazyOffset = si.lazyOffset
mi.presenceOffset = si.presenceOffset
mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
fields := mi.Desc.Fields()
for i := 0; i < fields.Len(); i++ {
fd := fields.Get(i)
fs := si.fieldsByNumber[fd.Number()]
if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
fs = si.oneofsByName[fd.ContainingOneof().Name()]
}
ft := fs.Type
var wiretag uint64
if !fd.IsPacked() {
wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
} else {
wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
}
var fieldOffset offset
var funcs pointerCoderFuncs
var childMessage *MessageInfo
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
fieldOffset = offsetOf(fs)
case fd.Message() != nil && !fd.IsMap():
fieldOffset = offsetOf(fs)
if fd.IsList() {
childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
} else {
childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
}
default:
fieldOffset = offsetOf(fs)
childMessage, funcs = fieldCoder(fd, ft)
}
cf := &coderFieldInfo{
num: fd.Number(),
offset: fieldOffset,
wiretag: wiretag,
ft: ft,
tagsize: protowire.SizeVarint(wiretag),
funcs: funcs,
mi: childMessage,
validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
isPointer: (fd.Cardinality() == protoreflect.Repeated ||
fd.Kind() == protoreflect.MessageKind ||
fd.Kind() == protoreflect.GroupKind),
isRequired: fd.Cardinality() == protoreflect.Required,
presenceIndex: noPresence,
}
// TODO: Use presence for all fields.
//
// In some cases, such as maps, presence means only "might be set" rather
// than "is definitely set", but every field should have a presence bit to
// permit us to skip over definitely-unset fields at marshal time.
var hasPresence bool
hasPresence, cf.isLazy = usePresenceForField(si, fd)
if hasPresence {
cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
}
for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
if od := oneofs.Get(i); !od.IsSynthetic() {
mi.initOneofFieldCoders(od, si.structInfo)
}
}
if messageset.IsMessageSet(mi.Desc) {
if !mi.extensionOffset.IsValid() {
panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
}
if !mi.unknownOffset.IsValid() {
panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
}
mi.isMessageSet = true
}
sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
})
var maxDense protoreflect.FieldNumber
for _, cf := range mi.orderedCoderFields {
if cf.num >= 16 && cf.num >= 2*maxDense {
break
}
maxDense = cf.num
}
mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
for _, cf := range mi.orderedCoderFields {
if int(cf.num) > len(mi.denseCoderFields) {
break
}
mi.denseCoderFields[cf.num] = cf
}
// To preserve compatibility with historic wire output, marshal oneofs last.
if mi.Desc.Oneofs().Len() > 0 {
sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
fi := fields.ByNumber(mi.orderedCoderFields[i].num)
fj := fields.ByNumber(mi.orderedCoderFields[j].num)
return order.LegacyFieldOrder(fi, fj)
})
}
mi.needsInitCheck = needsInitCheck(mi.Desc)
if mi.methods.Marshal == nil && mi.methods.Size == nil {
mi.methods.Flags |= piface.SupportMarshalDeterministic
mi.methods.Marshal = mi.marshal
mi.methods.Size = mi.size
}
if mi.methods.Unmarshal == nil {
mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
mi.methods.Unmarshal = mi.unmarshal
}
if mi.methods.CheckInitialized == nil {
mi.methods.CheckInitialized = mi.checkInitialized
}
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
if mi.methods.Equal == nil {
mi.methods.Equal = equal
}
}

View File

@@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int)
}
num, _ := protowire.DecodeTag(xi.wiretag)
size += messageset.SizeField(num)
if fullyLazyExtensions(opts) {
// Don't expand the extension, instead use the buffer to calculate size
if lb := x.lazyBuffer(); lb != nil {
// We got hold of the buffer, so it's still lazy.
// Don't count the tag size in the extension buffer, it's already added.
size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize
continue
}
}
size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
}
@@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma
xi := getExtensionFieldInfo(x.Type())
num, _ := protowire.DecodeTag(xi.wiretag)
b = messageset.AppendFieldStart(b, num)
if fullyLazyExtensions(opts) {
// Don't expand the extension if it's still in wire format, instead use the buffer content.
if lb := x.lazyBuffer(); lb != nil {
// The tag inside the lazy buffer is a different tag (the extension
// number), but what we need here is the tag for FieldMessage:
b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType))
b = append(b, lb[xi.tagsize:]...)
b = messageset.AppendFieldEnd(b)
return b, nil
}
}
b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
if err != nil {
return b, err

View File

@@ -1,210 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego || appengine
// +build purego appengine
package impl
import (
"reflect"
"google.golang.org/protobuf/encoding/protowire"
)
func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
v := p.v.Elem().Int()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := p.v.Elem().Int()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
return b, nil
}
func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
p.v.Elem().SetInt(int64(v))
out.n = n
return out, nil
}
func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
dst.v.Elem().Set(src.v.Elem())
}
var coderEnum = pointerCoderFuncs{
size: sizeEnum,
marshal: appendEnum,
unmarshal: consumeEnum,
merge: mergeEnum,
}
func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
if p.v.Elem().Int() == 0 {
return 0
}
return sizeEnum(p, f, opts)
}
func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
if p.v.Elem().Int() == 0 {
return b, nil
}
return appendEnum(b, p, f, opts)
}
func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
if src.v.Elem().Int() != 0 {
dst.v.Elem().Set(src.v.Elem())
}
}
var coderEnumNoZero = pointerCoderFuncs{
size: sizeEnumNoZero,
marshal: appendEnumNoZero,
unmarshal: consumeEnum,
merge: mergeEnumNoZero,
}
func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return sizeEnum(pointer{p.v.Elem()}, f, opts)
}
func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
return appendEnum(b, pointer{p.v.Elem()}, f, opts)
}
func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
if p.v.Elem().IsNil() {
p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
}
return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
}
func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
if !src.v.Elem().IsNil() {
v := reflect.New(dst.v.Type().Elem().Elem())
v.Elem().Set(src.v.Elem().Elem())
dst.v.Elem().Set(v)
}
}
var coderEnumPtr = pointerCoderFuncs{
size: sizeEnumPtr,
marshal: appendEnumPtr,
unmarshal: consumeEnumPtr,
merge: mergeEnumPtr,
}
func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.v.Elem()
for i, llen := 0, s.Len(); i < llen; i++ {
size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
}
return size
}
func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.v.Elem()
for i, llen := 0, s.Len(); i < llen; i++ {
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
}
return b, nil
}
func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
s := p.v.Elem()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
s.Set(reflect.Append(s, rv))
b = b[n:]
}
out.n = n
return out, nil
}
if wtyp != protowire.VarintType {
return out, errUnknown
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
s.Set(reflect.Append(s, rv))
out.n = n
return out, nil
}
func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
}
var coderEnumSlice = pointerCoderFuncs{
size: sizeEnumSlice,
marshal: appendEnumSlice,
unmarshal: consumeEnumSlice,
merge: mergeEnumSlice,
}
func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.v.Elem()
llen := s.Len()
if llen == 0 {
return 0
}
n := 0
for i := 0; i < llen; i++ {
n += protowire.SizeVarint(uint64(s.Index(i).Int()))
}
return f.tagsize + protowire.SizeBytes(n)
}
func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.v.Elem()
llen := s.Len()
if llen == 0 {
return b, nil
}
b = protowire.AppendVarint(b, f.wiretag)
n := 0
for i := 0; i < llen; i++ {
n += protowire.SizeVarint(uint64(s.Index(i).Int()))
}
b = protowire.AppendVarint(b, uint64(n))
for i := 0; i < llen; i++ {
b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
}
return b, nil
}
var coderEnumPackedSlice = pointerCoderFuncs{
size: sizeEnumPackedSlice,
marshal: appendEnumPackedSlice,
unmarshal: consumeEnumSlice,
merge: mergeEnumSlice,
}

View File

@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego && !appengine
// +build !purego,!appengine
package impl
// When using unsafe pointers, we can just treat enum values as int32s.

View File

@@ -14,7 +14,7 @@ import (
// unwrapper unwraps the value to the underlying value.
// This is implemented by List and Map.
type unwrapper interface {
protoUnwrap() interface{}
protoUnwrap() any
}
// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
// pref.Value.String never panics, so we go through an interface
// protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {

View File

@@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value {
func (ls *listReflect) IsValid() bool {
return !ls.v.IsNil()
}
func (ls *listReflect) protoUnwrap() interface{} {
func (ls *listReflect) protoUnwrap() any {
return ls.v.Interface()
}

View File

@@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value {
return v
}
func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
iter := mapRange(ms.v)
iter := ms.v.MapRange()
for iter.Next() {
k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
v := ms.valConv.PBValueOf(iter.Value())
@@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value {
func (ms *mapReflect) IsValid() bool {
return !ms.v.IsNil()
}
func (ms *mapReflect) protoUnwrap() interface{} {
func (ms *mapReflect) protoUnwrap() any {
return ms.v.Interface()
}

View File

@@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
AllowPartial: true,
DiscardUnknown: o.DiscardUnknown(),
Resolver: o.resolver,
NoLazyDecoding: o.NoLazyDecoding(),
}
}
@@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool {
return o.flags&protoiface.UnmarshalDiscardUnknown != 0
}
func (o unmarshalOptions) IsDefault() bool {
return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 }
func (o unmarshalOptions) NoLazyDecoding() bool {
return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
}
func (o unmarshalOptions) CanBeLazy() bool {
if o.resolver != protoregistry.GlobalTypes {
return false
}
// We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
}
var lazyUnmarshalOptions = unmarshalOptions{
resolver: protoregistry.GlobalTypes,
depth: protowire.DefaultRecursionLimit,
flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
depth: protowire.DefaultRecursionLimit,
}
type unmarshalOutput struct {
@@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
if flags.ProtoLegacy && mi.isMessageSet {
return unmarshalMessageSet(mi, b, p, opts)
}
lazyDecoding := LazyEnabled() // default
if opts.NoLazyDecoding() {
lazyDecoding = false // explicitly disabled
}
if mi.lazyOffset.IsValid() && lazyDecoding {
return mi.unmarshalPointerLazy(b, p, groupTag, opts)
}
return mi.unmarshalPointerEager(b, p, groupTag, opts)
}
// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
// The corresponding function for Lazy is in google_lazy.go.
func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
initialized := true
var requiredMask uint64
var exts *map[int32]ExtensionField
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
}
start := len(b)
for len(b) > 0 {
// Parse the tag (field number and wire type).
@@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
if f.funcs.isInit != nil && !o.initialized {
initialized = false
}
if f.presenceIndex != noPresence {
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
}
default:
// Possible extension.
if exts == nil && mi.extensionOffset.IsValid() {
@@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
return out, errUnknown
}
if flags.LazyUnmarshalExtensions {
if opts.IsDefault() && x.canLazy(xt) {
if opts.CanBeLazy() && x.canLazy(xt) {
out, valid := skipExtension(b, xi, num, wtyp, opts)
switch valid {
case ValidationValid:
@@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
if n < 0 {
return out, ValidationUnknown
}
if opts.Validated() {
out.initialized = true
out.n = n
return out, ValidationValid
}
out, st := xi.validation.mi.validate(v, 0, opts)
out.n = n
return out, st

View File

@@ -10,7 +10,8 @@ import (
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
proto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/internal/protolazy"
"google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)
@@ -49,8 +50,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
return 0
}
if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
return int(size)
// The size cache contains the size + 1, to allow the
// zero value to be invalid, while also allowing for a
// 0 size to be cached.
if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 {
return int(size - 1)
}
}
return mi.sizePointerSlow(p, opts)
@@ -60,7 +64,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
if flags.ProtoLegacy && mi.isMessageSet {
size = sizeMessageSet(mi, p, opts)
if mi.sizecacheOffset.IsValid() {
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
}
return size
}
@@ -68,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
e := p.Apply(mi.extensionOffset).Extensions()
size += mi.sizeExtensions(e, opts)
}
var lazy **protolazy.XXX_lazyUnmarshalInfo
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
if mi.lazyOffset.IsValid() {
lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
}
}
for _, f := range mi.orderedCoderFields {
if f.funcs.size == nil {
continue
}
fptr := p.Apply(f.offset)
if f.presenceIndex != noPresence {
if !presence.Present(f.presenceIndex) {
continue
}
if f.isLazy && fptr.AtomicGetPointer().IsNil() {
if lazyFields(opts) {
size += (*lazy).SizeField(uint32(f.num))
continue
} else {
mi.lazyUnmarshal(p, f.num)
}
}
size += f.funcs.size(fptr, f, opts)
continue
}
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@@ -84,13 +116,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
}
}
if mi.sizecacheOffset.IsValid() {
if size > math.MaxInt32 {
if size > (math.MaxInt32 - 1) {
// The size is too large for the int32 sizecache field.
// We will need to recompute the size when encoding;
// unfortunately expensive, but better than invalid output.
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0)
} else {
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
// The size cache contains the size + 1, to allow the
// zero value to be invalid, while also allowing for a
// 0 size to be cached.
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
}
}
return size
@@ -128,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
return b, err
}
}
var lazy **protolazy.XXX_lazyUnmarshalInfo
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
if mi.lazyOffset.IsValid() {
lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
}
}
for _, f := range mi.orderedCoderFields {
if f.funcs.marshal == nil {
continue
}
fptr := p.Apply(f.offset)
if f.presenceIndex != noPresence {
if !presence.Present(f.presenceIndex) {
continue
}
if f.isLazy {
// Be careful, this field needs to be read atomically, like for a get
if f.isPointer && fptr.AtomicGetPointer().IsNil() {
if lazyFields(opts) {
b, _ = (*lazy).AppendField(b, uint32(f.num))
continue
} else {
mi.lazyUnmarshal(p, f.num)
}
}
b, err = f.funcs.marshal(b, fptr, f, opts)
if err != nil {
return b, err
}
continue
} else if f.isPointer && fptr.Elem().IsNil() {
continue
}
b, err = f.funcs.marshal(b, fptr, f, opts)
if err != nil {
return b, err
}
continue
}
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@@ -149,6 +225,22 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
return b, nil
}
// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal.
func fullyLazyExtensions(opts marshalOptions) bool {
// When deterministic marshaling is requested, force an unmarshal for lazy
// extensions to produce a deterministic result, instead of passing through
// bytes lazily that may or may not match what Go Protobuf would produce.
return opts.flags&piface.MarshalDeterministic == 0
}
// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
func lazyFields(opts marshalOptions) bool {
// When deterministic marshaling is requested, force an unmarshal for lazy
// fields to produce a deterministic result, instead of passing through
// bytes lazily that may or may not match what Go Protobuf would produce.
return opts.flags&piface.MarshalDeterministic == 0
}
func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
if ext == nil {
return 0
@@ -158,6 +250,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha
if xi.funcs.size == nil {
continue
}
if fullyLazyExtensions(opts) {
// Don't expand the extension, instead use the buffer to calculate size
if lb := x.lazyBuffer(); lb != nil {
// We got hold of the buffer, so it's still lazy.
n += len(lb)
continue
}
}
n += xi.funcs.size(x.Value(), xi.tagsize, opts)
}
return n
@@ -176,6 +276,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
var err error
for _, x := range *ext {
xi := getExtensionFieldInfo(x.Type())
if fullyLazyExtensions(opts) {
// Don't expand the extension if it's still in wire format, instead use the buffer content.
if lb := x.lazyBuffer(); lb != nil {
b = append(b, lb...)
continue
}
}
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
}
return b, err
@@ -191,6 +298,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
for _, k := range keys {
x := (*ext)[int32(k)]
xi := getExtensionFieldInfo(x.Type())
if fullyLazyExtensions(opts) {
// Don't expand the extension if it's still in wire format, instead use the buffer content.
if lb := x.lazyBuffer(); lb != nil {
b = append(b, lb...)
continue
}
}
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
if err != nil {
return b, err

View File

@@ -0,0 +1,224 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"bytes"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
)
func equal(in protoiface.EqualInput) protoiface.EqualOutput {
return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
}
// equalMessage is a fast-path variant of protoreflect.equalMessage.
// It takes advantage of the internal messageState type to avoid
// unnecessary allocations, type assertions.
func equalMessage(mx, my protoreflect.Message) bool {
if mx == nil || my == nil {
return mx == my
}
if mx.Descriptor() != my.Descriptor() {
return false
}
msx, ok := mx.(*messageState)
if !ok {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
msy, ok := my.(*messageState)
if !ok {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
mi := msx.messageInfo()
miy := msy.messageInfo()
if mi != miy {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
mi.init()
// Compares regular fields
// Modified Message.Range code that compares two messages of the same type
// while going over the fields.
for _, ri := range mi.rangeInfos {
var fd protoreflect.FieldDescriptor
var vx, vy protoreflect.Value
switch ri := ri.(type) {
case *fieldInfo:
hx := ri.has(msx.pointer())
hy := ri.has(msy.pointer())
if hx != hy {
return false
}
if !hx {
continue
}
fd = ri.fieldDesc
vx = ri.get(msx.pointer())
vy = ri.get(msy.pointer())
case *oneofInfo:
fnx := ri.which(msx.pointer())
fny := ri.which(msy.pointer())
if fnx != fny {
return false
}
if fnx <= 0 {
continue
}
fi := mi.fields[fnx]
fd = fi.fieldDesc
vx = fi.get(msx.pointer())
vy = fi.get(msy.pointer())
}
if !equalValue(fd, vx, vy) {
return false
}
}
// Compare extensions.
// This is more complicated because mx or my could have empty/nil extension maps,
// however some populated extension map values are equal to nil extension maps.
emx := mi.extensionMap(msx.pointer())
emy := mi.extensionMap(msy.pointer())
if emx != nil {
for k, x := range *emx {
xd := x.Type().TypeDescriptor()
xv := x.Value()
var y ExtensionField
ok := false
if emy != nil {
y, ok = (*emy)[k]
}
// We need to treat empty lists as equal to nil values
if emy == nil || !ok {
if xd.IsList() && xv.List().Len() == 0 {
continue
}
return false
}
if !equalValue(xd, xv, y.Value()) {
return false
}
}
}
if emy != nil {
// emy may have extensions emx does not have, need to check them as well
for k, y := range *emy {
if emx != nil {
// emx has the field, so we already checked it
if _, ok := (*emx)[k]; ok {
continue
}
}
// Empty lists are equal to nil
if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
continue
}
// Cant be equal if the extension is populated
return false
}
}
return equalUnknown(mx.GetUnknown(), my.GetUnknown())
}
func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
// slow path
if fd.Kind() != protoreflect.MessageKind {
return vx.Equal(vy)
}
// fast path special cases
if fd.IsMap() {
if fd.MapValue().Kind() == protoreflect.MessageKind {
return equalMessageMap(vx.Map(), vy.Map())
}
return vx.Equal(vy)
}
if fd.IsList() {
return equalMessageList(vx.List(), vy.List())
}
return equalMessage(vx.Message(), vy.Message())
}
// Mostly copied from protoreflect.equalMap.
// This variant only works for messages as map types.
// All other map types should be handled via Value.Equal.
func equalMessageMap(mx, my protoreflect.Map) bool {
if mx.Len() != my.Len() {
return false
}
equal := true
mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
if !my.Has(k) {
equal = false
return false
}
vy := my.Get(k)
equal = equalMessage(vx.Message(), vy.Message())
return equal
})
return equal
}
// Mostly copied from protoreflect.equalList.
// The only change is the usage of equalImpl instead of protoreflect.equalValue.
func equalMessageList(lx, ly protoreflect.List) bool {
if lx.Len() != ly.Len() {
return false
}
for i := 0; i < lx.Len(); i++ {
// We only operate on messages here since equalImpl will not call us in any other case.
if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
return false
}
}
return true
}
// equalUnknown compares unknown fields by direct comparison on the raw bytes
// of each individual field number.
// Copied from protoreflect.equalUnknown.
func equalUnknown(x, y protoreflect.RawFields) bool {
if len(x) != len(y) {
return false
}
if bytes.Equal([]byte(x), []byte(y)) {
return true
}
mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
for len(x) > 0 {
fnum, _, n := protowire.ConsumeField(x)
mx[fnum] = append(mx[fnum], x[:n]...)
x = x[n:]
}
for len(y) > 0 {
fnum, _, n := protowire.ConsumeField(y)
my[fnum] = append(my[fnum], y[:n]...)
y = y[n:]
}
if len(mx) != len(my) {
return false
}
for k, v1 := range mx {
if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
return false
}
}
return true
}

View File

@@ -53,7 +53,7 @@ type ExtensionInfo struct {
// type returned by InterfaceOf may not be identical.
//
// Deprecated: Use InterfaceOf(xt.Zero()) instead.
ExtensionType interface{}
ExtensionType any
// Field is the field number of the extension.
//
@@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value {
func (xi *ExtensionInfo) Zero() protoreflect.Value {
return xi.lazyInit().Zero()
}
func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value {
func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value {
return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
}
func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} {
func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any {
return xi.lazyInit().GoValueOf(v).Interface()
}
func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool {
return xi.lazyInit().IsValidPB(v)
}
func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
func (xi *ExtensionInfo) IsValidInterface(v any) bool {
return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
}
func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {

433
vendor/google.golang.org/protobuf/internal/impl/lazy.go generated vendored Normal file
View File

@@ -0,0 +1,433 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"math/bits"
"os"
"reflect"
"sort"
"sync/atomic"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/protolazy"
"google.golang.org/protobuf/reflect/protoreflect"
preg "google.golang.org/protobuf/reflect/protoregistry"
piface "google.golang.org/protobuf/runtime/protoiface"
)
var enableLazy int32 = func() int32 {
if os.Getenv("GOPROTODEBUG") == "nolazy" {
return 0
}
return 1
}()
// EnableLazyUnmarshal enables lazy unmarshaling.
func EnableLazyUnmarshal(enable bool) {
if enable {
atomic.StoreInt32(&enableLazy, 1)
return
}
atomic.StoreInt32(&enableLazy, 0)
}
// LazyEnabled reports whether lazy unmarshalling is currently enabled.
func LazyEnabled() bool {
return atomic.LoadInt32(&enableLazy) != 0
}
// UnmarshalField unmarshals a field in a message.
func UnmarshalField(m interface{}, num protowire.Number) {
switch m := m.(type) {
case *messageState:
m.messageInfo().lazyUnmarshal(m.pointer(), num)
case *messageReflectWrapper:
m.messageInfo().lazyUnmarshal(m.pointer(), num)
default:
panic(fmt.Sprintf("unsupported wrapper type %T", m))
}
}
func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
var f *coderFieldInfo
if int(num) < len(mi.denseCoderFields) {
f = mi.denseCoderFields[num]
} else {
f = mi.coderFields[num]
}
if f == nil {
panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
}
lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
if !found && multipleEntries == nil {
panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
}
// The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
// Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
fp := pointerOfValue(reflect.New(f.ft))
if multipleEntries != nil {
for _, entry := range multipleEntries {
mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
}
} else {
mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
}
p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
}
func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
opts := lazyUnmarshalOptions
opts.flags |= flags
for len(b) > 0 {
// Parse the tag (field number and wire type).
var tag uint64
if b[0] < 0x80 {
tag = uint64(b[0])
b = b[1:]
} else if len(b) >= 2 && b[1] < 128 {
tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
b = b[2:]
} else {
var n int
tag, n = protowire.ConsumeVarint(b)
if n < 0 {
return errors.New("invalid wire data")
}
b = b[n:]
}
var num protowire.Number
if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
return errors.New("invalid wire data")
} else {
num = protowire.Number(n)
}
wtyp := protowire.Type(tag & 7)
if num == f.num {
o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
if err == nil {
b = b[o.n:]
continue
}
if err != errUnknown {
return err
}
}
n := protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return errors.New("invalid wire data")
}
b = b[n:]
}
return nil
}
func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
fmi := f.validation.mi
if fmi == nil {
fd := mi.Desc.Fields().ByNumber(f.num)
if fd == nil {
return out, ValidationUnknown
}
messageName := fd.Message().FullName()
messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
if err != nil {
return out, ValidationUnknown
}
var ok bool
fmi, ok = messageType.(*MessageInfo)
if !ok {
return out, ValidationUnknown
}
}
fmi.init()
switch f.validation.typ {
case validationTypeMessage:
if wtyp != protowire.BytesType {
return out, ValidationWrongWireType
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, ValidationInvalid
}
out, st := fmi.validate(v, 0, opts)
out.n = n
return out, st
case validationTypeGroup:
if wtyp != protowire.StartGroupType {
return out, ValidationWrongWireType
}
out, st := fmi.validate(b, f.num, opts)
return out, st
default:
return out, ValidationUnknown
}
}
// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
// specifically handles lazy unmarshalling. it expects lazyOffset and
// presenceOffset to both be valid.
func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
initialized := true
var requiredMask uint64
var lazy **protolazy.XXX_lazyUnmarshalInfo
var presence presence
var lazyIndex []protolazy.IndexEntry
var lastNum protowire.Number
outOfOrder := false
lazyDecode := false
presence = p.Apply(mi.presenceOffset).PresenceInfo()
lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
if !presence.AnyPresent(mi.presenceSize) {
if opts.CanBeLazy() {
// If the message contains existing data, we need to merge into it.
// Lazy unmarshaling doesn't merge, so only enable it when the
// message is empty (has no presence bitmap).
lazyDecode = true
if *lazy == nil {
*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
}
(*lazy).SetUnmarshalFlags(opts.flags)
if !opts.AliasBuffer() {
// Make a copy of the buffer for lazy unmarshaling.
// Set the AliasBuffer flag so recursive unmarshal
// operations reuse the copy.
b = append([]byte{}, b...)
opts.flags |= piface.UnmarshalAliasBuffer
}
(*lazy).SetBuffer(b)
}
}
// Track special handling of lazy fields.
//
// In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
// In the event that validation for a field fails, this map tracks handling of the field.
type lazyAction uint8
const (
lazyValidateOnly lazyAction = iota // validate the field only
lazyUnmarshalNow // eagerly unmarshal the field
lazyUnmarshalLater // unmarshal the field after the message is fully processed
)
var lazyFields map[*coderFieldInfo]lazyAction
var exts *map[int32]ExtensionField
start := len(b)
pos := 0
for len(b) > 0 {
// Parse the tag (field number and wire type).
var tag uint64
if b[0] < 0x80 {
tag = uint64(b[0])
b = b[1:]
} else if len(b) >= 2 && b[1] < 128 {
tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
b = b[2:]
} else {
var n int
tag, n = protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
b = b[n:]
}
var num protowire.Number
if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
return out, errors.New("invalid field number")
} else {
num = protowire.Number(n)
}
wtyp := protowire.Type(tag & 7)
if wtyp == protowire.EndGroupType {
if num != groupTag {
return out, errors.New("mismatching end group marker")
}
groupTag = 0
break
}
var f *coderFieldInfo
if int(num) < len(mi.denseCoderFields) {
f = mi.denseCoderFields[num]
} else {
f = mi.coderFields[num]
}
var n int
err := errUnknown
discardUnknown := false
Field:
switch {
case f != nil:
if f.funcs.unmarshal == nil {
break
}
if f.isLazy && lazyDecode {
switch {
case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
// Attempt to validate this field and leave it for later lazy unmarshaling.
o, valid := mi.skipField(b, f, wtyp, opts)
switch valid {
case ValidationValid:
// Skip over the valid field and continue.
err = nil
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
requiredMask |= f.validation.requiredBit
if !o.initialized {
initialized = false
}
n = o.n
break Field
case ValidationInvalid:
return out, errors.New("invalid proto wire format")
case ValidationWrongWireType:
break Field
case ValidationUnknown:
if lazyFields == nil {
lazyFields = make(map[*coderFieldInfo]lazyAction)
}
if presence.Present(f.presenceIndex) {
// We were unable to determine if the field is valid or not,
// and we've already skipped over at least one instance of this
// field. Clear the presence bit (so if we stop decoding early,
// we don't leave a partially-initialized field around) and flag
// the field for unmarshaling before we return.
presence.ClearPresent(f.presenceIndex)
lazyFields[f] = lazyUnmarshalLater
discardUnknown = true
break Field
} else {
// We were unable to determine if the field is valid or not,
// but this is the first time we've seen it. Flag it as needing
// eager unmarshaling and fall through to the eager unmarshal case below.
lazyFields[f] = lazyUnmarshalNow
}
}
case lazyFields[f] == lazyUnmarshalLater:
// This field will be unmarshaled in a separate pass below.
// Skip over it here.
discardUnknown = true
break Field
default:
// Eagerly unmarshal the field.
}
}
if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
if p.Apply(f.offset).AtomicGetPointer().IsNil() {
mi.lazyUnmarshal(p, f.num)
}
}
var o unmarshalOutput
o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
n = o.n
if err != nil {
break
}
requiredMask |= f.validation.requiredBit
if f.funcs.isInit != nil && !o.initialized {
initialized = false
}
if f.presenceIndex != noPresence {
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
}
default:
// Possible extension.
if exts == nil && mi.extensionOffset.IsValid() {
exts = p.Apply(mi.extensionOffset).Extensions()
if *exts == nil {
*exts = make(map[int32]ExtensionField)
}
}
if exts == nil {
break
}
var o unmarshalOutput
o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
if err != nil {
break
}
n = o.n
if !o.initialized {
initialized = false
}
}
if err != nil {
if err != errUnknown {
return out, err
}
n = protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return out, errDecode
}
if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
u := mi.mutableUnknownBytes(p)
*u = protowire.AppendTag(*u, num, wtyp)
*u = append(*u, b[:n]...)
}
}
b = b[n:]
end := start - len(b)
if lazyDecode && f != nil && f.isLazy {
if num != lastNum {
lazyIndex = append(lazyIndex, protolazy.IndexEntry{
FieldNum: uint32(num),
Start: uint32(pos),
End: uint32(end),
})
} else {
i := len(lazyIndex) - 1
lazyIndex[i].End = uint32(end)
lazyIndex[i].MultipleContiguous = true
}
}
if num < lastNum {
outOfOrder = true
}
pos = end
lastNum = num
}
if groupTag != 0 {
return out, errors.New("missing end group marker")
}
if lazyFields != nil {
// Some fields failed validation, and now need to be unmarshaled.
for f, action := range lazyFields {
if action != lazyUnmarshalLater {
continue
}
initialized = false
if *lazy == nil {
*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
}
if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
return out, err
}
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
}
}
if lazyDecode {
if outOfOrder {
sort.Slice(lazyIndex, func(i, j int) bool {
return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
(lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
lazyIndex[i].Start < lazyIndex[j].Start)
})
}
if *lazy == nil {
*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
}
(*lazy).SetIndex(lazyIndex)
}
if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
initialized = false
}
if initialized {
out.initialized = true
}
out.n = start - len(b)
return out, nil
}

View File

@@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber {
func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum {
return e
}
func (e *legacyEnumWrapper) protoUnwrap() interface{} {
func (e *legacyEnumWrapper) protoUnwrap() any {
v := reflect.New(e.goTyp).Elem()
v.SetInt(int64(e.num))
return v.Interface()

View File

@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
func (x placeholderExtension) IsLazy() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
func (x placeholderExtension) IsList() bool { return false }
func (x placeholderExtension) IsMap() bool { return false }

View File

@@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
}
for _, fn := range methods {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
if vs, ok := v.Interface().([]interface{}); ok {
if vs, ok := v.Interface().([]any); ok {
for _, v := range vs {
oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
}
@@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
fd.L0.Parent = md
fd.L0.Index = n
if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked {
if fd.L1.EditionFeatures.IsPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
if fd.L1.IsWeak {
opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
}
if fd.L1.EditionFeatures.IsPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
}
@@ -567,6 +564,6 @@ func (m aberrantMessage) IsValid() bool {
func (m aberrantMessage) ProtoMethods() *protoiface.Methods {
return aberrantProtoMethods
}
func (m aberrantMessage) protoUnwrap() interface{} {
func (m aberrantMessage) protoUnwrap() any {
return m.v.Interface()
}

View File

@@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
if src.IsNil() {
return
}
var presenceSrc presence
var presenceDst presence
if mi.presenceOffset.IsValid() {
presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
}
for _, f := range mi.orderedCoderFields {
if f.funcs.merge == nil {
continue
}
sfptr := src.Apply(f.offset)
if f.presenceIndex != noPresence {
if !presenceSrc.Present(f.presenceIndex) {
continue
}
dfptr := dst.Apply(f.offset)
if f.isLazy {
if sfptr.AtomicGetPointer().IsNil() {
mi.lazyUnmarshal(src, f.num)
}
if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
mi.lazyUnmarshal(dst, f.num)
}
}
f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
continue
}
if f.isPointer && sfptr.Elem().IsNil() {
continue
}

View File

@@ -14,7 +14,6 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// MessageInfo provides protobuf related functionality for a given Go type
@@ -30,12 +29,12 @@ type MessageInfo struct {
// Desc is the underlying message descriptor type and must be populated.
Desc protoreflect.MessageDescriptor
// Exporter must be provided in a purego environment in order to provide
// access to unexported fields.
// Deprecated: Exporter will be removed the next time we bump
// protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
OneofWrappers []interface{}
OneofWrappers []any
initMu sync.Mutex // protects all unexported fields
initDone uint32
@@ -47,7 +46,7 @@ type MessageInfo struct {
// exporter is a function that returns a reference to the ith field of v,
// where v is a pointer to a struct. It returns nil if it does not support
// exporting the requested field (e.g., already exported).
type exporter func(v interface{}, i int) interface{}
type exporter func(v any, i int) any
// getMessageInfo returns the MessageInfo for any message type that
// is generated by our implementation of protoc-gen-go (for v2 and on).
@@ -79,6 +78,9 @@ func (mi *MessageInfo) initOnce() {
if mi.initDone == 1 {
return
}
if opaqueInitHook(mi) {
return
}
t := mi.GoReflectType
if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@@ -117,7 +119,6 @@ type (
var (
sizecacheType = reflect.TypeOf(SizeCache(0))
weakFieldsType = reflect.TypeOf(WeakFields(nil))
unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil))
unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil))
extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
@@ -126,13 +127,14 @@ var (
type structInfo struct {
sizecacheOffset offset
sizecacheType reflect.Type
weakOffset offset
weakType reflect.Type
unknownOffset offset
unknownType reflect.Type
extensionOffset offset
extensionType reflect.Type
lazyOffset offset
presenceOffset offset
fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField
oneofsByName map[protoreflect.Name]reflect.StructField
oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber
@@ -142,9 +144,10 @@ type structInfo struct {
func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
si := structInfo{
sizecacheOffset: invalidOffset,
weakOffset: invalidOffset,
unknownOffset: invalidOffset,
extensionOffset: invalidOffset,
lazyOffset: invalidOffset,
presenceOffset: invalidOffset,
fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{},
oneofsByName: map[protoreflect.Name]reflect.StructField{},
@@ -157,24 +160,23 @@ fieldLoop:
switch f := t.Field(i); f.Name {
case genid.SizeCache_goname, genid.SizeCacheA_goname:
if f.Type == sizecacheType {
si.sizecacheOffset = offsetOf(f, mi.Exporter)
si.sizecacheOffset = offsetOf(f)
si.sizecacheType = f.Type
}
case genid.WeakFields_goname, genid.WeakFieldsA_goname:
if f.Type == weakFieldsType {
si.weakOffset = offsetOf(f, mi.Exporter)
si.weakType = f.Type
}
case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
si.unknownOffset = offsetOf(f, mi.Exporter)
si.unknownOffset = offsetOf(f)
si.unknownType = f.Type
}
case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
if f.Type == extensionFieldsType {
si.extensionOffset = offsetOf(f, mi.Exporter)
si.extensionOffset = offsetOf(f)
si.extensionType = f.Type
}
case "lazyFields", "XXX_lazyUnmarshalInfo":
si.lazyOffset = offsetOf(f)
case "XXX_presence":
si.presenceOffset = offsetOf(f)
default:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
@@ -201,7 +203,7 @@ fieldLoop:
}
for _, fn := range methods {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
if vs, ok := v.Interface().([]interface{}); ok {
if vs, ok := v.Interface().([]any); ok {
oneofWrappers = vs
}
}
@@ -244,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
mi.init()
fd := mi.Desc.Fields().Get(i)
switch {
case fd.IsWeak():
mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName())
return mt
case fd.IsMap():
return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
default:
@@ -256,7 +255,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
type mapEntryType struct {
desc protoreflect.MessageDescriptor
valType interface{} // zero value of enum or message type
valType any // zero value of enum or message type
}
func (mt mapEntryType) New() protoreflect.Message {

View File

@@ -0,0 +1,627 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"math"
"reflect"
"strings"
"sync/atomic"
"google.golang.org/protobuf/reflect/protoreflect"
)
type opaqueStructInfo struct {
structInfo
}
// isOpaque determines whether a protobuf message type is on the Opaque API. It
// checks whether the type is a Go struct that protoc-gen-go would generate.
//
// This function only detects newly generated messages from the v2
// implementation of protoc-gen-go. It is unable to classify generated messages
// that are too old or those that are generated by a different generator
// such as protoc-gen-gogo.
func isOpaque(t reflect.Type) bool {
// The current detection mechanism is to simply check the first field
// for a struct tag with the "protogen" key.
if t.Kind() == reflect.Struct && t.NumField() > 0 {
pgt := t.Field(0).Tag.Get("protogen")
return strings.HasPrefix(pgt, "opaque.")
}
return false
}
func opaqueInitHook(mi *MessageInfo) bool {
mt := mi.GoReflectType.Elem()
si := opaqueStructInfo{
structInfo: mi.makeStructInfo(mt),
}
if !isOpaque(mt) {
return false
}
defer atomic.StoreUint32(&mi.initDone, 1)
mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
fds := mi.Desc.Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
fs := si.fieldsByNumber[fd.Number()]
var fi fieldInfo
usePresence, _ := usePresenceForField(si, fd)
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
// Oneofs are no different for opaque.
fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
case fd.IsMap():
fi = mi.fieldInfoForMapOpaque(si, fd, fs)
case fd.IsList() && fd.Message() == nil && usePresence:
fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
case fd.IsList() && fd.Message() == nil:
// Proto3 lists without presence can use same access methods as open
fi = fieldInfoForList(fd, fs, mi.Exporter)
case fd.IsList() && usePresence:
fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
case fd.IsList():
// Proto3 opaque messages that does not need presence bitmap.
// Different representation than open struct, but same logic
fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
case fd.Message() != nil && usePresence:
fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
case fd.Message() != nil:
// Proto3 messages without presence can use same access methods as open
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
}
mi.fields[fd.Number()] = &fi
}
mi.oneofs = map[protoreflect.Name]*oneofInfo{}
for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
od := mi.Desc.Oneofs().Get(i)
mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
}
mi.denseFields = make([]*fieldInfo, fds.Len()*2)
for i := 0; i < fds.Len(); i++ {
if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
}
}
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
i += od.Fields().Len()
} else {
mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
i++
}
}
mi.makeExtensionFieldsFunc(mt, si.structInfo)
mi.makeUnknownFieldsFunc(mt, si.structInfo)
mi.makeOpaqueCoderMethods(mt, si)
mi.makeFieldTypes(si.structInfo)
return true
}
func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
oi := &oneofInfo{oneofDesc: od}
if od.IsSynthetic() {
fd := od.Fields().Get(0)
index, _ := presenceIndex(mi.Desc, fd)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
}
if !mi.present(p, index) {
return 0
}
return od.Fields().Get(0).Number()
}
return oi
}
// Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
return makeOneofInfo(od, si, x)
}
func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
}
fieldOffset := offsetOf(fs)
conv := NewConverter(ft, fd)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
// Don't bother checking presence bits, since we need to
// look at the map length even if the presence bit is set.
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return rv.Len() > 0
},
clear: func(p pointer) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
pv := conv.GoValueOf(v)
if pv.IsNil() {
panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(pv)
},
mutable: func(p pointer) protoreflect.Value {
v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if v.IsNil() {
v.Set(reflect.MakeMap(fs.Type))
}
return conv.PBValueOf(v)
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(reflect.PtrTo(ft), fd)
fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return rv.Len() > 0
},
clear: func(p pointer) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
if rv.Elem().Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
pv := conv.GoValueOf(v)
if pv.IsNil() {
panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
}
mi.setPresent(p, index)
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(pv.Elem())
},
mutable: func(p pointer) protoreflect.Value {
mi.setPresent(p, index)
return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
fieldNumber := fd.Number()
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
if !mi.present(p, index) {
return false
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
sp = p.Apply(fieldOffset).AtomicGetPointer()
}
rv := sp.AsValueOf(fs.Type.Elem())
return rv.Elem().Len() > 0
},
clear: func(p pointer) {
fp := p.Apply(fieldOffset)
sp := fp.AtomicGetPointer()
if sp.IsNil() {
sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
mi.setPresent(p, index)
}
rv := sp.AsValueOf(fs.Type.Elem())
rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
if !mi.present(p, index) {
return conv.Zero()
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
sp = p.Apply(fieldOffset).AtomicGetPointer()
}
rv := sp.AsValueOf(fs.Type.Elem())
if rv.Elem().Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
fp := p.Apply(fieldOffset)
sp := fp.AtomicGetPointer()
if sp.IsNil() {
sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
mi.setPresent(p, index)
}
rv := sp.AsValueOf(fs.Type.Elem())
val := conv.GoValueOf(v)
if val.IsNil() {
panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
} else {
rv.Elem().Set(val.Elem())
}
},
mutable: func(p pointer) protoreflect.Value {
fp := p.Apply(fieldOffset)
sp := fp.AtomicGetPointer()
if sp.IsNil() {
if mi.present(p, index) {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
sp = p.Apply(fieldOffset).AtomicGetPointer()
} else {
sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
mi.setPresent(p, index)
}
}
rv := sp.AsValueOf(fs.Type.Elem())
return conv.PBValueOf(rv)
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
return false
}
rv := sp.AsValueOf(fs.Type.Elem())
return rv.Elem().Len() > 0
},
clear: func(p pointer) {
sp := p.Apply(fieldOffset).AtomicGetPointer()
if !sp.IsNil() {
rv := sp.AsValueOf(fs.Type.Elem())
rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
}
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
return conv.Zero()
}
rv := sp.AsValueOf(fs.Type.Elem())
if rv.Elem().Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.IsNil() {
rv.Set(reflect.New(fs.Type.Elem()))
}
val := conv.GoValueOf(v)
if val.IsNil() {
panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
} else {
rv.Elem().Set(val.Elem())
}
},
mutable: func(p pointer) protoreflect.Value {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.IsNil() {
rv.Set(reflect.New(fs.Type.Elem()))
}
return conv.PBValueOf(rv)
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
nullable := fd.HasPresence()
if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
nullable = true
}
deref := false
if nullable && ft.Kind() == reflect.Ptr {
ft = ft.Elem()
deref = true
}
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
var getter func(p pointer) protoreflect.Value
if !nullable {
getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
} else {
getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
}
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
if nullable {
return mi.present(p, index)
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
switch rv.Kind() {
case reflect.Bool:
return rv.Bool()
case reflect.Int32, reflect.Int64:
return rv.Int() != 0
case reflect.Uint32, reflect.Uint64:
return rv.Uint() != 0
case reflect.Float32, reflect.Float64:
return rv.Float() != 0 || math.Signbit(rv.Float())
case reflect.String, reflect.Slice:
return rv.Len() > 0
default:
panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
}
},
clear: func(p pointer) {
if nullable {
mi.clearPresent(p, index)
}
// This is only valuable for bytes and strings, but we do it unconditionally.
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: getter,
// TODO: Implement unsafe fast path for set?
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if deref {
if rv.IsNil() {
rv.Set(reflect.New(ft))
}
rv = rv.Elem()
}
rv.Set(conv.GoValueOf(v))
if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
rv.Set(emptyBytes)
}
if nullable {
mi.setPresent(p, index)
}
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
fieldNumber := fd.Number()
elemType := fs.Type.Elem()
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
return mi.present(p, index)
},
clear: func(p pointer) {
mi.clearPresent(p, index)
p.Apply(fieldOffset).AtomicSetNilPointer()
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
fp := p.Apply(fieldOffset)
mp := fp.AtomicGetPointer()
if mp.IsNil() {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
mp = fp.AtomicGetPointer()
}
rv := mp.AsValueOf(elemType)
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
val := pointerOfValue(conv.GoValueOf(v))
if val.IsNil() {
panic("invalid nil pointer")
}
p.Apply(fieldOffset).AtomicSetPointer(val)
mi.setPresent(p, index)
},
mutable: func(p pointer) protoreflect.Value {
fp := p.Apply(fieldOffset)
mp := fp.AtomicGetPointer()
if mp.IsNil() {
if mi.present(p, index) {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
mp = fp.AtomicGetPointer()
} else {
mp = pointerOfValue(conv.GoValueOf(conv.New()))
fp.AtomicSetPointer(mp)
mi.setPresent(p, index)
}
}
return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
},
newMessage: func() protoreflect.Message {
return conv.New().Message()
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
// A presenceList wraps a List, updating presence bits as necessary when the
// list contents change.
type presenceList struct {
pvalueList
setPresence func(bool)
}
type pvalueList interface {
protoreflect.List
//Unwrapper
}
func (list presenceList) Append(v protoreflect.Value) {
list.pvalueList.Append(v)
list.setPresence(true)
}
func (list presenceList) Truncate(i int) {
list.pvalueList.Truncate(i)
list.setPresence(i > 0)
}
// presenceIndex returns the index to pass to presence functions.
//
// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
found := false
var index, numIndices uint32
for i := 0; i < md.Fields().Len(); i++ {
f := md.Fields().Get(i)
if f == fd {
found = true
index = numIndices
}
if f.ContainingOneof() == nil || isLastOneofField(f) {
numIndices++
}
}
if !found {
panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
}
return index, presenceSize(numIndices)
}
func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
fields := fd.ContainingOneof().Fields()
return fields.Get(fields.Len()-1) == fd
}
func (mi *MessageInfo) setPresent(p pointer, index uint32) {
p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
}
func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
}
func (mi *MessageInfo) present(p pointer, index uint32) bool {
return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
}
// usePresenceForField implements the somewhat intricate logic of when
// the presence bitmap is used for a field. The main logic is that a
// field that is optional or that can be lazy will use the presence
// bit, but for proto2, also maps have a presence bit. It also records
// if the field can ever be lazy, which is true if we have a
// lazyOffset and the field is a message or a slice of messages. A
// field that is lazy will always need a presence bit. Oneofs are not
// lazy and do not use presence, unless they are a synthetic oneof,
// which is a proto3 optional field. For proto3 optionals, we use the
// presence and they can also be lazy when applicable (a message).
func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
// Non-oneof scalar fields with explicit field presence use the presence array.
usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
return false, false
case fd.IsMap():
return false, false
case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
return hasLazyField, hasLazyField
default:
return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
}
}

View File

@@ -0,0 +1,132 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-types. DO NOT EDIT.
package impl
import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
)
func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
ft := fs.Type
if ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
if fd.Kind() == protoreflect.EnumKind {
// Enums for nullable opaque types.
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return conv.PBValueOf(rv)
}
}
switch ft.Kind() {
case reflect.Bool:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bool()
return protoreflect.ValueOfBool(*x)
}
case reflect.Int32:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int32()
return protoreflect.ValueOfInt32(*x)
}
case reflect.Uint32:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint32()
return protoreflect.ValueOfUint32(*x)
}
case reflect.Int64:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int64()
return protoreflect.ValueOfInt64(*x)
}
case reflect.Uint64:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint64()
return protoreflect.ValueOfUint64(*x)
}
case reflect.Float32:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float32()
return protoreflect.ValueOfFloat32(*x)
}
case reflect.Float64:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float64()
return protoreflect.ValueOfFloat64(*x)
}
case reflect.String:
if fd.Kind() == protoreflect.BytesKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
if len(**x) == 0 {
return protoreflect.ValueOfBytes(nil)
}
return protoreflect.ValueOfBytes([]byte(**x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfString(**x)
}
case reflect.Slice:
if fd.Kind() == protoreflect.StringKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfString(string(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfBytes(*x)
}
}
panic("unexpected protobuf kind: " + ft.Kind().String())
}

View File

@@ -20,7 +20,7 @@ type reflectMessageInfo struct {
// fieldTypes contains the zero value of an enum or message field.
// For lists, it contains the element type.
// For maps, it contains the entry value type.
fieldTypes map[protoreflect.FieldNumber]interface{}
fieldTypes map[protoreflect.FieldNumber]any
// denseFields is a subset of fields where:
// 0 < fieldDesc.Number() < len(denseFields)
@@ -28,7 +28,7 @@ type reflectMessageInfo struct {
denseFields []*fieldInfo
// rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
rangeInfos []interface{} // either *fieldInfo or *oneofInfo
rangeInfos []any // either *fieldInfo or *oneofInfo
getUnknown func(pointer) protoreflect.RawFields
setUnknown func(pointer, protoreflect.RawFields)
@@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
fi = fieldInfoForMap(fd, fs, mi.Exporter)
case fd.IsList():
fi = fieldInfoForList(fd, fs, mi.Exporter)
case fd.IsWeak():
fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.Message() != nil:
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
@@ -205,6 +203,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
case fd.IsList():
if fd.Enum() != nil || fd.Message() != nil {
ft = fs.Type.Elem()
if ft.Kind() == reflect.Slice {
ft = ft.Elem()
}
}
isMessage = fd.Message() != nil
case fd.Enum() != nil:
@@ -214,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
}
case fd.Message() != nil:
ft = fs.Type
if fd.IsWeak() {
ft = nil
}
isMessage = true
}
if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
@@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
}
if ft != nil {
if mi.fieldTypes == nil {
mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{})
mi.fieldTypes = make(map[protoreflect.FieldNumber]any)
}
mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
}
@@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) {
if !ok {
return false
}
if x.isUnexpandedLazy() {
// Avoid calling x.Value(), which triggers a lazy unmarshal.
return true
}
switch {
case xd.IsList():
return x.Value().List().Len() > 0
@@ -389,7 +393,7 @@ var (
// MessageOf returns a reflective view over a message. The input must be a
// pointer to a named Go struct. If the provided type has a ProtoReflect method,
// it must be implemented by calling this method.
func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message {
func (mi *MessageInfo) MessageOf(m any) protoreflect.Message {
if reflect.TypeOf(m) != mi.GoReflectType {
panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
}
@@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() {
func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message {
return (*messageReflectWrapper)(m)
}
func (m *messageIfaceWrapper) protoUnwrap() interface{} {
func (m *messageIfaceWrapper) protoUnwrap() any {
return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
}

View File

@@ -8,11 +8,8 @@ import (
"fmt"
"math"
"reflect"
"sync"
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
type fieldInfo struct {
@@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField,
isMessage := fd.Message() != nil
// TODO: Implement unsafe fast path?
fieldOffset := offsetOf(fs, x)
fieldOffset := offsetOf(fs)
return fieldInfo{
// NOTE: The logic below intentionally assumes that oneof fields are
// well-formatted. That is, the oneof interface never contains a
@@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
fieldOffset := offsetOf(fs, x)
fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
conv := NewConverter(reflect.PtrTo(ft), fd)
// TODO: Implement unsafe fast path?
fieldOffset := offsetOf(fs, x)
fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -256,6 +253,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
ft := fs.Type
nullable := fd.HasPresence()
isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
var getter func(p pointer) protoreflect.Value
if nullable {
if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
// This never occurs for generated message types.
@@ -268,19 +266,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs)
// Generate specialized getter functions to avoid going through reflect.Value
if nullable {
getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
} else {
getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
}
// TODO: Implement unsafe fast path?
fieldOffset := offsetOf(fs, x)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable {
return !rv.IsNil()
return !p.Apply(fieldOffset).Elem().IsNil()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
switch rv.Kind() {
case reflect.Bool:
return rv.Bool()
@@ -300,21 +304,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable {
if rv.IsNil() {
return conv.Zero()
}
if rv.Kind() == reflect.Ptr {
rv = rv.Elem()
}
}
return conv.PBValueOf(rv)
},
get: getter,
// TODO: Implement unsafe fast path for set?
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable && rv.Kind() == reflect.Ptr {
@@ -338,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
if !flags.ProtoLegacy {
panic("no support for proto1 weak fields")
}
var once sync.Once
var messageType protoreflect.MessageType
lazyInit := func() {
once.Do(func() {
messageName := fd.Message().FullName()
messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
if messageType == nil {
panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
}
})
}
num := fd.Number()
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
_, ok := p.Apply(weakOffset).WeakFields().get(num)
return ok
},
clear: func(p pointer) {
p.Apply(weakOffset).WeakFields().clear(num)
},
get: func(p pointer) protoreflect.Value {
lazyInit()
if p.IsNil() {
return protoreflect.ValueOfMessage(messageType.Zero())
}
m, ok := p.Apply(weakOffset).WeakFields().get(num)
if !ok {
return protoreflect.ValueOfMessage(messageType.Zero())
}
return protoreflect.ValueOfMessage(m.ProtoReflect())
},
set: func(p pointer, v protoreflect.Value) {
lazyInit()
m := v.Message()
if m.Descriptor() != messageType.Descriptor() {
if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
}
panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
}
p.Apply(weakOffset).WeakFields().set(num, m.Interface())
},
mutable: func(p pointer) protoreflect.Value {
lazyInit()
fs := p.Apply(weakOffset).WeakFields()
m, ok := fs.get(num)
if !ok {
m = messageType.New().Interface()
fs.set(num, m)
}
return protoreflect.ValueOfMessage(m.ProtoReflect())
},
newMessage: func() protoreflect.Message {
lazyInit()
return messageType.New()
},
newField: func() protoreflect.Value {
lazyInit()
return protoreflect.ValueOfMessage(messageType.New())
},
}
}
func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
fieldOffset := offsetOf(fs, x)
fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -425,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if fs.Type.Kind() != reflect.Ptr {
return !isZero(rv)
return !rv.IsZero()
}
return !rv.IsNil()
},
@@ -472,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
oi := &oneofInfo{oneofDesc: od}
if od.IsSynthetic() {
fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
fieldOffset := offsetOf(fs, x)
fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -485,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
}
} else {
fs := si.oneofsByName[od.Name()]
fieldOffset := offsetOf(fs, x)
fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -503,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
}
return oi
}
// isZero is identical to reflect.Value.IsZero.
// TODO: Remove this when Go1.13 is the minimally supported Go version.
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return math.Float64bits(v.Float()) == 0
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if !isZero(v.Index(i)) {
return false
}
}
return true
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
return v.IsNil()
case reflect.String:
return v.Len() == 0
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if !isZero(v.Field(i)) {
return false
}
}
return true
default:
panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()})
}
}

View File

@@ -0,0 +1,273 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-types. DO NOT EDIT.
package impl
import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
)
func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
ft := fs.Type
if ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
if fd.Kind() == protoreflect.EnumKind {
elemType := fs.Type.Elem()
// Enums for nullable types.
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
if rv.IsNil() {
return conv.Zero()
}
return conv.PBValueOf(rv.Elem())
}
}
switch ft.Kind() {
case reflect.Bool:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).BoolPtr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfBool(**x)
}
case reflect.Int32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int32Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfInt32(**x)
}
case reflect.Uint32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint32Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfUint32(**x)
}
case reflect.Int64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int64Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfInt64(**x)
}
case reflect.Uint64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint64Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfUint64(**x)
}
case reflect.Float32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float32Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfFloat32(**x)
}
case reflect.Float64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float64Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfFloat64(**x)
}
case reflect.String:
if fd.Kind() == protoreflect.BytesKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
if len(**x) == 0 {
return protoreflect.ValueOfBytes(nil)
}
return protoreflect.ValueOfBytes([]byte(**x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfString(**x)
}
case reflect.Slice:
if fd.Kind() == protoreflect.StringKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
if len(*x) == 0 {
return conv.Zero()
}
return protoreflect.ValueOfString(string(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfBytes(*x)
}
}
panic("unexpected protobuf kind: " + ft.Kind().String())
}
func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
ft := fs.Type
if fd.Kind() == protoreflect.EnumKind {
// Enums for non nullable types.
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return conv.PBValueOf(rv)
}
}
switch ft.Kind() {
case reflect.Bool:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bool()
return protoreflect.ValueOfBool(*x)
}
case reflect.Int32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int32()
return protoreflect.ValueOfInt32(*x)
}
case reflect.Uint32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint32()
return protoreflect.ValueOfUint32(*x)
}
case reflect.Int64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int64()
return protoreflect.ValueOfInt64(*x)
}
case reflect.Uint64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint64()
return protoreflect.ValueOfUint64(*x)
}
case reflect.Float32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float32()
return protoreflect.ValueOfFloat32(*x)
}
case reflect.Float64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float64()
return protoreflect.ValueOfFloat64(*x)
}
case reflect.String:
if fd.Kind() == protoreflect.BytesKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).String()
if len(*x) == 0 {
return protoreflect.ValueOfBytes(nil)
}
return protoreflect.ValueOfBytes([]byte(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).String()
return protoreflect.ValueOfString(*x)
}
case reflect.Slice:
if fd.Kind() == protoreflect.StringKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfString(string(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfBytes(*x)
}
}
panic("unexpected protobuf kind: " + ft.Kind().String())
}

View File

@@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message {
func (m *messageState) Interface() protoreflect.ProtoMessage {
return m.protoUnwrap().(protoreflect.ProtoMessage)
}
func (m *messageState) protoUnwrap() interface{} {
func (m *messageState) protoUnwrap() any {
return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
}
func (m *messageState) ProtoMethods() *protoiface.Methods {
@@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage {
}
return (*messageIfaceWrapper)(m)
}
func (m *messageReflectWrapper) protoUnwrap() interface{} {
func (m *messageReflectWrapper) protoUnwrap() any {
return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
}
func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {

View File

@@ -1,215 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego || appengine
// +build purego appengine
package impl
import (
"fmt"
"reflect"
"sync"
)
const UnsafeEnabled = false
// Pointer is an opaque pointer type.
type Pointer interface{}
// offset represents the offset to a struct field, accessible from a pointer.
// The offset is the field index into a struct.
type offset struct {
index int
export exporter
}
// offsetOf returns a field offset for the struct field.
func offsetOf(f reflect.StructField, x exporter) offset {
if len(f.Index) != 1 {
panic("embedded structs are not supported")
}
if f.PkgPath == "" {
return offset{index: f.Index[0]} // field is already exported
}
if x == nil {
panic("exporter must be provided for unexported field")
}
return offset{index: f.Index[0], export: x}
}
// IsValid reports whether the offset is valid.
func (f offset) IsValid() bool { return f.index >= 0 }
// invalidOffset is an invalid field offset.
var invalidOffset = offset{index: -1}
// zeroOffset is a noop when calling pointer.Apply.
var zeroOffset = offset{index: 0}
// pointer is an abstract representation of a pointer to a struct or field.
type pointer struct{ v reflect.Value }
// pointerOf returns p as a pointer.
func pointerOf(p Pointer) pointer {
return pointerOfIface(p)
}
// pointerOfValue returns v as a pointer.
func pointerOfValue(v reflect.Value) pointer {
return pointer{v: v}
}
// pointerOfIface returns the pointer portion of an interface.
func pointerOfIface(v interface{}) pointer {
return pointer{v: reflect.ValueOf(v)}
}
// IsNil reports whether the pointer is nil.
func (p pointer) IsNil() bool {
return p.v.IsNil()
}
// Apply adds an offset to the pointer to derive a new pointer
// to a specified field. The current pointer must be pointing at a struct.
func (p pointer) Apply(f offset) pointer {
if f.export != nil {
if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
return pointer{v: v}
}
}
return pointer{v: p.v.Elem().Field(f.index).Addr()}
}
// AsValueOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
if got := p.v.Type().Elem(); got != t {
panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
}
return p.v
}
// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to p.AsValueOf(t).Interface()
func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
return p.AsValueOf(t).Interface()
}
func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
func (p pointer) String() *string { return p.v.Interface().(*string) }
func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
func (p pointer) Extensions() *map[int32]ExtensionField {
return p.v.Interface().(*map[int32]ExtensionField)
}
func (p pointer) Elem() pointer {
return pointer{v: p.v.Elem()}
}
// PointerSlice copies []*T from p as a new []pointer.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) PointerSlice() []pointer {
// TODO: reconsider this
if p.v.IsNil() {
return nil
}
n := p.v.Elem().Len()
s := make([]pointer, n)
for i := 0; i < n; i++ {
s[i] = pointer{v: p.v.Elem().Index(i)}
}
return s
}
// AppendPointerSlice appends v to p, which must be a []*T.
func (p pointer) AppendPointerSlice(v pointer) {
sp := p.v.Elem()
sp.Set(reflect.Append(sp, v.v))
}
// SetPointer sets *p to v.
func (p pointer) SetPointer(v pointer) {
p.v.Elem().Set(v.v)
}
func growSlice(p pointer, addCap int) {
// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
in := p.v.Elem()
out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
reflect.Copy(out, in)
p.v.Elem().Set(out)
}
func (p pointer) growBoolSlice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growInt32Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growUint32Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growInt64Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growUint64Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growFloat64Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growFloat32Slice(addCap int) {
growSlice(p, addCap)
}
func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
func (ms *messageState) pointer() pointer { panic("not supported") }
func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
type atomicNilMessage struct {
once sync.Once
m messageReflectWrapper
}
func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
m.once.Do(func() {
m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
m.m.mi = mi
})
return &m.m
}

View File

@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego && !appengine
// +build !purego,!appengine
package impl
import (
"reflect"
"sync/atomic"
"unsafe"
"google.golang.org/protobuf/internal/protolazy"
)
const UnsafeEnabled = true
@@ -23,7 +22,7 @@ type Pointer unsafe.Pointer
type offset uintptr
// offsetOf returns a field offset for the struct field.
func offsetOf(f reflect.StructField, x exporter) offset {
func offsetOf(f reflect.StructField) offset {
return offset(f.Offset)
}
@@ -50,7 +49,7 @@ func pointerOfValue(v reflect.Value) pointer {
}
// pointerOfIface returns the pointer portion of an interface.
func pointerOfIface(v interface{}) pointer {
func pointerOfIface(v any) pointer {
type ifaceHeader struct {
Type unsafe.Pointer
Data unsafe.Pointer
@@ -80,7 +79,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to p.AsValueOf(t).Interface()
func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
func (p pointer) AsIfaceOf(t reflect.Type) any {
// TODO: Use tricky unsafe magic to directly create ifaceHeader.
return p.AsValueOf(t).Interface()
}
@@ -112,8 +111,14 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p
func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) }
func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
}
func (p pointer) PresenceInfo() presence {
return presence{P: p.p}
}
func (p pointer) Elem() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}

View File

@@ -0,0 +1,42 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"sync/atomic"
"unsafe"
)
func (p pointer) AtomicGetPointer() pointer {
return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
}
func (p pointer) AtomicSetPointer(v pointer) {
atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
}
func (p pointer) AtomicSetNilPointer() {
atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
}
func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
return v
}
return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
}
type atomicV1MessageInfo struct{ p Pointer }
func (mi *atomicV1MessageInfo) Get() Pointer {
return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
}
func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
return p
}
return mi.Get()
}

View File

@@ -0,0 +1,142 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"sync/atomic"
"unsafe"
)
// presenceSize represents the size of a presence set, which should be the largest index of the set+1
type presenceSize uint32
// presence is the internal representation of the bitmap array in a generated protobuf
type presence struct {
// This is a pointer to the beginning of an array of uint32
P unsafe.Pointer
}
func (p presence) toElem(num uint32) (ret *uint32) {
const (
bitsPerByte = 8
siz = unsafe.Sizeof(*ret)
)
// p.P points to an array of uint32, num is the bit in this array that the
// caller wants to check/manipulate. Calculate the index in the array that
// contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
offset := uintptr(num) / (siz * bitsPerByte) * siz
return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
}
// Present checks for the presence of a specific field number in a presence set.
func (p presence) Present(num uint32) bool {
if p.P == nil {
return false
}
return Export{}.Present(p.toElem(num), num)
}
// SetPresent adds presence for a specific field number in a presence set.
func (p presence) SetPresent(num uint32, size presenceSize) {
Export{}.SetPresent(p.toElem(num), num, uint32(size))
}
// SetPresentUnatomic adds presence for a specific field number in a presence set without using
// atomic operations. Only to be called during unmarshaling.
func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
}
// ClearPresent removes presence for a specific field number in a presence set.
func (p presence) ClearPresent(num uint32) {
Export{}.ClearPresent(p.toElem(num), num)
}
// LoadPresenceCache (together with PresentInCache) allows for a
// cached version of checking for presence without re-reading the word
// for every field. It is optimized for efficiency and assumes no
// simltaneous mutation of the presence set (or at least does not have
// a problem with simultaneous mutation giving inconsistent results).
func (p presence) LoadPresenceCache() (current uint32) {
if p.P == nil {
return 0
}
return atomic.LoadUint32((*uint32)(p.P))
}
// PresentInCache reads presence from a cached word in the presence
// bitmap. It caches up a new word if the bit is outside the
// word. This is for really fast iteration through bitmaps in cases
// where we either know that the bitmap will not be altered, or we
// don't care about inconsistencies caused by simultaneous writes.
func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
if num/32 != *cachedElement {
o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
*current = atomic.LoadUint32(q)
*cachedElement = num / 32
}
return (*current & (1 << (num % 32))) > 0
}
// AnyPresent checks if any field is marked as present in the bitmap.
func (p presence) AnyPresent(size presenceSize) bool {
n := uintptr((size + 31) / 32)
for j := uintptr(0); j < n; j++ {
o := j * unsafe.Sizeof(uint32(0))
q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
b := atomic.LoadUint32(q)
if b > 0 {
return true
}
}
return false
}
// toRaceDetectData finds the preceding RaceDetectHookData in a
// message by using pointer arithmetic. As the type of the presence
// set (bitmap) varies with the number of fields in the protobuf, we
// can not have a struct type containing the array and the
// RaceDetectHookData. instead the RaceDetectHookData is placed
// immediately before the bitmap array, and we find it by walking
// backwards in the struct.
//
// This method is only called from the race-detect version of the code,
// so RaceDetectHookData is never an empty struct.
func (p presence) toRaceDetectData() *RaceDetectHookData {
var template struct {
d RaceDetectHookData
a [1]uint32
}
o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
}
func atomicLoadShadowPresence(p **[]byte) *[]byte {
return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
}
// findPointerToRaceDetectData finds the preceding RaceDetectHookData
// in a message by using pointer arithmetic. For the methods called
// directy from generated code, we don't have a pointer to the
// beginning of the presence set, but a pointer inside the array. As
// we know the index of the bit we're manipulating (num), we can
// calculate which element of the array ptr is pointing to. With that
// information we find the preceding RaceDetectHookData and can
// manipulate the shadow bitmap.
//
// This method is only called from the race-detect version of the
// code, so RaceDetectHookData is never an empty struct.
func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
var template struct {
d RaceDetectHookData
a [1]uint32
}
o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
}

View File

@@ -37,6 +37,10 @@ const (
// ValidationValid indicates that unmarshaling the message will succeed.
ValidationValid
// ValidationWrongWireType indicates that a validated field does not have
// the expected wire type.
ValidationWrongWireType
)
func (v ValidationStatus) String() string {
@@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
if ft.Kind() == reflect.Ptr {
// Repeated opaque message fields are *[]*T.
ft = ft.Elem()
}
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
if ft.Kind() == reflect.Ptr {
// Repeated opaque message fields are *[]*T.
ft = ft.Elem()
}
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
@@ -195,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
if !fd.IsWeak() {
vi.mi = getMessageInfo(ft)
}
vi.mi = getMessageInfo(ft)
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
vi.mi = getMessageInfo(ft)
@@ -304,26 +318,6 @@ State:
}
if f != nil {
vi = f.validation
if vi.typ == validationTypeMessage && vi.mi == nil {
// Probable weak field.
//
// TODO: Consider storing the results of this lookup somewhere
// rather than recomputing it on every validation.
fd := st.mi.Desc.Fields().ByNumber(num)
if fd == nil || !fd.IsWeak() {
break
}
messageName := fd.Message().FullName()
messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName)
switch err {
case nil:
vi.mi, _ = messageType.(*MessageInfo)
case protoregistry.NotFound:
vi.typ = validationTypeBytes
default:
return out, ValidationUnknown
}
}
break
}
// Possible extension field.

View File

@@ -1,74 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// weakFields adds methods to the exported WeakFields type for internal use.
//
// The exported type is an alias to an unnamed type, so methods can't be
// defined directly on it.
type weakFields WeakFields
func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) {
m, ok := w[int32(num)]
return m, ok
}
func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) {
if *w == nil {
*w = make(weakFields)
}
(*w)[int32(num)] = m
}
func (w *weakFields) clear(num protoreflect.FieldNumber) {
delete(*w, int32(num))
}
func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool {
_, ok := w[int32(num)]
return ok
}
func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) {
delete(*w, int32(num))
}
func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage {
if m, ok := w[int32(num)]; ok {
return m
}
mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
if mt == nil {
panic(fmt.Sprintf("message %v for weak field is not linked in", name))
}
return mt.Zero().Interface()
}
func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) {
if m != nil {
mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
if mt == nil {
panic(fmt.Sprintf("message %v for weak field is not linked in", name))
}
if mt != m.ProtoReflect().Type() {
panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
}
}
if m == nil || !m.ProtoReflect().IsValid() {
delete(*w, int32(num))
return
}
if *w == nil {
*w = make(weakFields)
}
(*w)[int32(num)] = m
}