mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 23:19:58 +00:00
TUN-528: Move cloudflared into a separate repo
This commit is contained in:
955
vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go
generated
vendored
Normal file
955
vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go
generated
vendored
Normal file
@@ -0,0 +1,955 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/alert.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import duration "github.com/golang/protobuf/ptypes/duration"
|
||||
import wrappers "github.com/golang/protobuf/ptypes/wrappers"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Operators for combining conditions.
|
||||
type AlertPolicy_ConditionCombinerType int32
|
||||
|
||||
const (
|
||||
// An unspecified combiner.
|
||||
AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0
|
||||
// Combine conditions using the logical `AND` operator. An
|
||||
// incident is created only if all conditions are met
|
||||
// simultaneously. This combiner is satisfied if all conditions are
|
||||
// met, even if they are met on completely different resources.
|
||||
AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1
|
||||
// Combine conditions using the logical `OR` operator. An incident
|
||||
// is created if any of the listed conditions is met.
|
||||
AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2
|
||||
// Combine conditions using logical `AND` operator, but unlike the regular
|
||||
// `AND` option, an incident is created only if all conditions are met
|
||||
// simultaneously on at least one resource.
|
||||
AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3
|
||||
)
|
||||
|
||||
var AlertPolicy_ConditionCombinerType_name = map[int32]string{
|
||||
0: "COMBINE_UNSPECIFIED",
|
||||
1: "AND",
|
||||
2: "OR",
|
||||
3: "AND_WITH_MATCHING_RESOURCE",
|
||||
}
|
||||
var AlertPolicy_ConditionCombinerType_value = map[string]int32{
|
||||
"COMBINE_UNSPECIFIED": 0,
|
||||
"AND": 1,
|
||||
"OR": 2,
|
||||
"AND_WITH_MATCHING_RESOURCE": 3,
|
||||
}
|
||||
|
||||
func (x AlertPolicy_ConditionCombinerType) String() string {
|
||||
return proto.EnumName(AlertPolicy_ConditionCombinerType_name, int32(x))
|
||||
}
|
||||
func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_17949719c044e638, []int{0, 0}
|
||||
}
|
||||
|
||||
// A description of the conditions under which some aspect of your system is
|
||||
// considered to be "unhealthy" and the ways to notify people or services about
|
||||
// this state. For an overview of alert policies, see
|
||||
// [Introduction to Alerting](/monitoring/alerts/).
|
||||
type AlertPolicy struct {
|
||||
// Required if the policy exists. The resource name for this policy. The
|
||||
// syntax is:
|
||||
//
|
||||
// projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
|
||||
//
|
||||
// `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy
|
||||
// is created. When calling the
|
||||
// [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
|
||||
// method, do not include the `name` field in the alerting policy passed as
|
||||
// part of the request.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// A short name or phrase used to identify the policy in dashboards,
|
||||
// notifications, and incidents. To avoid confusion, don't use the same
|
||||
// display name for multiple policies in the same project. The name is
|
||||
// limited to 512 Unicode characters.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// Documentation that is included with notifications and incidents related to
|
||||
// this policy. Best practice is for the documentation to include information
|
||||
// to help responders understand, mitigate, escalate, and correct the
|
||||
// underlying problems detected by the alerting policy. Notification channels
|
||||
// that have limited capacity might not show this documentation.
|
||||
Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"`
|
||||
// User-supplied key/value data to be used for organizing and
|
||||
// identifying the `AlertPolicy` objects.
|
||||
//
|
||||
// The field can contain up to 64 entries. Each key and value is limited to
|
||||
// 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
|
||||
// values can contain only lowercase letters, numerals, underscores, and
|
||||
// dashes. Keys must begin with a letter.
|
||||
UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// A list of conditions for the policy. The conditions are combined by AND or
|
||||
// OR according to the `combiner` field. If the combined conditions evaluate
|
||||
// to true, then an incident is created. A policy can have from one to six
|
||||
// conditions.
|
||||
Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"`
|
||||
// How to combine the results of multiple conditions
|
||||
// to determine if an incident should be opened.
|
||||
Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"`
|
||||
// Whether or not the policy is enabled. On write, the default interpretation
|
||||
// if unset is that the policy is enabled. On read, clients should not make
|
||||
// any assumption about the state if it has not been populated. The
|
||||
// field should always be populated on List and Get operations, unless
|
||||
// a field projection has been specified that strips it out.
|
||||
Enabled *wrappers.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||
// Identifies the notification channels to which notifications should be sent
|
||||
// when incidents are opened or closed or when new violations occur on
|
||||
// an already opened incident. Each element of this array corresponds to
|
||||
// the `name` field in each of the
|
||||
// [`NotificationChannel`][google.monitoring.v3.NotificationChannel]
|
||||
// objects that are returned from the [`ListNotificationChannels`]
|
||||
// [google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
|
||||
// method. The syntax of the entries in this field is:
|
||||
//
|
||||
// projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
|
||||
NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
|
||||
// A read-only record of the creation of the alerting policy. If provided
|
||||
// in a call to create or update, this field will be ignored.
|
||||
CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"`
|
||||
// A read-only record of the most recent change to the alerting policy. If
|
||||
// provided in a call to create or update, this field will be ignored.
|
||||
MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) Reset() { *m = AlertPolicy{} }
|
||||
func (m *AlertPolicy) String() string { return proto.CompactTextString(m) }
|
||||
func (*AlertPolicy) ProtoMessage() {}
|
||||
func (*AlertPolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_17949719c044e638, []int{0}
|
||||
}
|
||||
func (m *AlertPolicy) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AlertPolicy.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AlertPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AlertPolicy.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *AlertPolicy) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AlertPolicy.Merge(dst, src)
|
||||
}
|
||||
func (m *AlertPolicy) XXX_Size() int {
|
||||
return xxx_messageInfo_AlertPolicy.Size(m)
|
||||
}
|
||||
func (m *AlertPolicy) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AlertPolicy.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AlertPolicy proto.InternalMessageInfo
|
||||
|
||||
func (m *AlertPolicy) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetDisplayName() string {
|
||||
if m != nil {
|
||||
return m.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation {
|
||||
if m != nil {
|
||||
return m.Documentation
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetUserLabels() map[string]string {
|
||||
if m != nil {
|
||||
return m.UserLabels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetConditions() []*AlertPolicy_Condition {
|
||||
if m != nil {
|
||||
return m.Conditions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType {
|
||||
if m != nil {
|
||||
return m.Combiner
|
||||
}
|
||||
return AlertPolicy_COMBINE_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetEnabled() *wrappers.BoolValue {
|
||||
if m != nil {
|
||||
return m.Enabled
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetNotificationChannels() []string {
|
||||
if m != nil {
|
||||
return m.NotificationChannels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetCreationRecord() *MutationRecord {
|
||||
if m != nil {
|
||||
return m.CreationRecord
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy) GetMutationRecord() *MutationRecord {
|
||||
if m != nil {
|
||||
return m.MutationRecord
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A content string and a MIME type that describes the content string's
|
||||
// format.
|
||||
type AlertPolicy_Documentation struct {
|
||||
// The text of the documentation, interpreted according to `mime_type`.
|
||||
// The content may not exceed 8,192 Unicode characters and may not exceed
|
||||
// more than 10,240 bytes when encoded in UTF-8 format, whichever is
|
||||
// smaller.
|
||||
Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
|
||||
// The format of the `content` field. Presently, only the value
|
||||
// `"text/markdown"` is supported. See
|
||||
// [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information.
|
||||
MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Documentation) Reset() { *m = AlertPolicy_Documentation{} }
|
||||
func (m *AlertPolicy_Documentation) String() string { return proto.CompactTextString(m) }
|
||||
func (*AlertPolicy_Documentation) ProtoMessage() {}
|
||||
func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_17949719c044e638, []int{0, 0}
|
||||
}
|
||||
func (m *AlertPolicy_Documentation) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AlertPolicy_Documentation.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AlertPolicy_Documentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AlertPolicy_Documentation.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *AlertPolicy_Documentation) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AlertPolicy_Documentation.Merge(dst, src)
|
||||
}
|
||||
func (m *AlertPolicy_Documentation) XXX_Size() int {
|
||||
return xxx_messageInfo_AlertPolicy_Documentation.Size(m)
|
||||
}
|
||||
func (m *AlertPolicy_Documentation) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AlertPolicy_Documentation.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AlertPolicy_Documentation proto.InternalMessageInfo
|
||||
|
||||
func (m *AlertPolicy_Documentation) GetContent() string {
|
||||
if m != nil {
|
||||
return m.Content
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Documentation) GetMimeType() string {
|
||||
if m != nil {
|
||||
return m.MimeType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// A condition is a true/false test that determines when an alerting policy
|
||||
// should open an incident. If a condition evaluates to true, it signifies
|
||||
// that something is wrong.
|
||||
type AlertPolicy_Condition struct {
|
||||
// Required if the condition exists. The unique resource name for this
|
||||
// condition. Its syntax is:
|
||||
//
|
||||
// projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
|
||||
//
|
||||
// `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the
|
||||
// condition is created as part of a new or updated alerting policy.
|
||||
//
|
||||
// When calling the
|
||||
// [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
|
||||
// method, do not include the `name` field in the conditions of the
|
||||
// requested alerting policy. Stackdriver Monitoring creates the
|
||||
// condition identifiers and includes them in the new policy.
|
||||
//
|
||||
// When calling the
|
||||
// [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy]
|
||||
// method to update a policy, including a condition `name` causes the
|
||||
// existing condition to be updated. Conditions without names are added to
|
||||
// the updated policy. Existing conditions are deleted if they are not
|
||||
// updated.
|
||||
//
|
||||
// Best practice is to preserve `[CONDITION_ID]` if you make only small
|
||||
// changes, such as those to condition thresholds, durations, or trigger
|
||||
// values. Otherwise, treat the change as a new condition and let the
|
||||
// existing condition be deleted.
|
||||
Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// A short name or phrase used to identify the condition in dashboards,
|
||||
// notifications, and incidents. To avoid confusion, don't use the same
|
||||
// display name for multiple conditions in the same policy.
|
||||
DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// Only one of the following condition types will be specified.
|
||||
//
|
||||
// Types that are valid to be assigned to Condition:
|
||||
// *AlertPolicy_Condition_ConditionThreshold
|
||||
// *AlertPolicy_Condition_ConditionAbsent
|
||||
Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition) Reset() { *m = AlertPolicy_Condition{} }
|
||||
func (m *AlertPolicy_Condition) String() string { return proto.CompactTextString(m) }
|
||||
func (*AlertPolicy_Condition) ProtoMessage() {}
|
||||
func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_17949719c044e638, []int{0, 1}
|
||||
}
|
||||
func (m *AlertPolicy_Condition) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AlertPolicy_Condition.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AlertPolicy_Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AlertPolicy_Condition.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *AlertPolicy_Condition) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AlertPolicy_Condition.Merge(dst, src)
|
||||
}
|
||||
func (m *AlertPolicy_Condition) XXX_Size() int {
|
||||
return xxx_messageInfo_AlertPolicy_Condition.Size(m)
|
||||
}
|
||||
func (m *AlertPolicy_Condition) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AlertPolicy_Condition.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AlertPolicy_Condition proto.InternalMessageInfo
|
||||
|
||||
type isAlertPolicy_Condition_Condition interface {
|
||||
isAlertPolicy_Condition_Condition()
|
||||
}
|
||||
|
||||
type AlertPolicy_Condition_ConditionThreshold struct {
|
||||
ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"`
|
||||
}
|
||||
type AlertPolicy_Condition_ConditionAbsent struct {
|
||||
ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {}
|
||||
func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {}
|
||||
|
||||
func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition {
|
||||
if m != nil {
|
||||
return m.Condition
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition) GetDisplayName() string {
|
||||
if m != nil {
|
||||
return m.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold {
|
||||
if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok {
|
||||
return x.ConditionThreshold
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence {
|
||||
if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok {
|
||||
return x.ConditionAbsent
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*AlertPolicy_Condition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _AlertPolicy_Condition_OneofMarshaler, _AlertPolicy_Condition_OneofUnmarshaler, _AlertPolicy_Condition_OneofSizer, []interface{}{
|
||||
(*AlertPolicy_Condition_ConditionThreshold)(nil),
|
||||
(*AlertPolicy_Condition_ConditionAbsent)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _AlertPolicy_Condition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*AlertPolicy_Condition)
|
||||
// condition
|
||||
switch x := m.Condition.(type) {
|
||||
case *AlertPolicy_Condition_ConditionThreshold:
|
||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ConditionThreshold); err != nil {
|
||||
return err
|
||||
}
|
||||
case *AlertPolicy_Condition_ConditionAbsent:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ConditionAbsent); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("AlertPolicy_Condition.Condition has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _AlertPolicy_Condition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*AlertPolicy_Condition)
|
||||
switch tag {
|
||||
case 1: // condition.condition_threshold
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(AlertPolicy_Condition_MetricThreshold)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Condition = &AlertPolicy_Condition_ConditionThreshold{msg}
|
||||
return true, err
|
||||
case 2: // condition.condition_absent
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(AlertPolicy_Condition_MetricAbsence)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Condition = &AlertPolicy_Condition_ConditionAbsent{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _AlertPolicy_Condition_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*AlertPolicy_Condition)
|
||||
// condition
|
||||
switch x := m.Condition.(type) {
|
||||
case *AlertPolicy_Condition_ConditionThreshold:
|
||||
s := proto.Size(x.ConditionThreshold)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *AlertPolicy_Condition_ConditionAbsent:
|
||||
s := proto.Size(x.ConditionAbsent)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Specifies how many time series must fail a predicate to trigger a
|
||||
// condition. If not specified, then a `{count: 1}` trigger is used.
|
||||
type AlertPolicy_Condition_Trigger struct {
|
||||
// A type of trigger.
|
||||
//
|
||||
// Types that are valid to be assigned to Type:
|
||||
// *AlertPolicy_Condition_Trigger_Count
|
||||
// *AlertPolicy_Condition_Trigger_Percent
|
||||
Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_Trigger) Reset() { *m = AlertPolicy_Condition_Trigger{} }
|
||||
func (m *AlertPolicy_Condition_Trigger) String() string { return proto.CompactTextString(m) }
|
||||
func (*AlertPolicy_Condition_Trigger) ProtoMessage() {}
|
||||
func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_17949719c044e638, []int{0, 1, 0}
|
||||
}
|
||||
func (m *AlertPolicy_Condition_Trigger) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_Trigger.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_Trigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_Trigger.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *AlertPolicy_Condition_Trigger) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AlertPolicy_Condition_Trigger.Merge(dst, src)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_Trigger) XXX_Size() int {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_Trigger.Size(m)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_Trigger) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AlertPolicy_Condition_Trigger.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AlertPolicy_Condition_Trigger proto.InternalMessageInfo
|
||||
|
||||
type isAlertPolicy_Condition_Trigger_Type interface {
|
||||
isAlertPolicy_Condition_Trigger_Type()
|
||||
}
|
||||
|
||||
type AlertPolicy_Condition_Trigger_Count struct {
|
||||
Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"`
|
||||
}
|
||||
type AlertPolicy_Condition_Trigger_Percent struct {
|
||||
Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {}
|
||||
func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {}
|
||||
|
||||
func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_Trigger) GetCount() int32 {
|
||||
if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Count); ok {
|
||||
return x.Count
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_Trigger) GetPercent() float64 {
|
||||
if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok {
|
||||
return x.Percent
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*AlertPolicy_Condition_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _AlertPolicy_Condition_Trigger_OneofMarshaler, _AlertPolicy_Condition_Trigger_OneofUnmarshaler, _AlertPolicy_Condition_Trigger_OneofSizer, []interface{}{
|
||||
(*AlertPolicy_Condition_Trigger_Count)(nil),
|
||||
(*AlertPolicy_Condition_Trigger_Percent)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _AlertPolicy_Condition_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*AlertPolicy_Condition_Trigger)
|
||||
// type
|
||||
switch x := m.Type.(type) {
|
||||
case *AlertPolicy_Condition_Trigger_Count:
|
||||
b.EncodeVarint(1<<3 | proto.WireVarint)
|
||||
b.EncodeVarint(uint64(x.Count))
|
||||
case *AlertPolicy_Condition_Trigger_Percent:
|
||||
b.EncodeVarint(2<<3 | proto.WireFixed64)
|
||||
b.EncodeFixed64(math.Float64bits(x.Percent))
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("AlertPolicy_Condition_Trigger.Type has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _AlertPolicy_Condition_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*AlertPolicy_Condition_Trigger)
|
||||
switch tag {
|
||||
case 1: // type.count
|
||||
if wire != proto.WireVarint {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeVarint()
|
||||
m.Type = &AlertPolicy_Condition_Trigger_Count{int32(x)}
|
||||
return true, err
|
||||
case 2: // type.percent
|
||||
if wire != proto.WireFixed64 {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeFixed64()
|
||||
m.Type = &AlertPolicy_Condition_Trigger_Percent{math.Float64frombits(x)}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _AlertPolicy_Condition_Trigger_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*AlertPolicy_Condition_Trigger)
|
||||
// type
|
||||
switch x := m.Type.(type) {
|
||||
case *AlertPolicy_Condition_Trigger_Count:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(x.Count))
|
||||
case *AlertPolicy_Condition_Trigger_Percent:
|
||||
n += 1 // tag and wire
|
||||
n += 8
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// A condition type that compares a collection of time series
|
||||
// against a threshold.
|
||||
type AlertPolicy_Condition_MetricThreshold struct {
|
||||
// A [filter](/monitoring/api/v3/filters) that
|
||||
// identifies which time series should be compared with the threshold.
|
||||
//
|
||||
// The filter is similar to the one that is specified in the
|
||||
// [`MetricService.ListTimeSeries`
|
||||
// request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
|
||||
// call is useful to verify the time series that will be retrieved /
|
||||
// processed) and must specify the metric type and optionally may contain
|
||||
// restrictions on resource type, resource labels, and metric labels.
|
||||
// This field may not exceed 2048 Unicode characters in length.
|
||||
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// Specifies the alignment of data points in individual time series as
|
||||
// well as how to combine the retrieved time series together (such as
|
||||
// when aggregating multiple streams on each resource to a single
|
||||
// stream for each resource or when aggregating streams across all
|
||||
// members of a group of resrouces). Multiple aggregations
|
||||
// are applied in the order specified.
|
||||
//
|
||||
// This field is similar to the one in the
|
||||
// [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
|
||||
// It is advisable to use the `ListTimeSeries` method when debugging this field.
|
||||
Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
|
||||
// A [filter](/monitoring/api/v3/filters) that identifies a time
|
||||
// series that should be used as the denominator of a ratio that will be
|
||||
// compared with the threshold. If a `denominator_filter` is specified,
|
||||
// the time series specified by the `filter` field will be used as the
|
||||
// numerator.
|
||||
//
|
||||
// The filter is similar to the one that is specified in the
|
||||
// [`MetricService.ListTimeSeries`
|
||||
// request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
|
||||
// call is useful to verify the time series that will be retrieved /
|
||||
// processed) and must specify the metric type and optionally may contain
|
||||
// restrictions on resource type, resource labels, and metric labels.
|
||||
// This field may not exceed 2048 Unicode characters in length.
|
||||
DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"`
|
||||
// Specifies the alignment of data points in individual time series
|
||||
// selected by `denominatorFilter` as
|
||||
// well as how to combine the retrieved time series together (such as
|
||||
// when aggregating multiple streams on each resource to a single
|
||||
// stream for each resource or when aggregating streams across all
|
||||
// members of a group of resources).
|
||||
//
|
||||
// When computing ratios, the `aggregations` and
|
||||
// `denominator_aggregations` fields must use the same alignment period
|
||||
// and produce time series that have the same periodicity and labels.
|
||||
//
|
||||
// This field is similar to the one in the
|
||||
// [`MetricService.ListTimeSeries`
|
||||
// request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It
|
||||
// is advisable to use the `ListTimeSeries` method when debugging this
|
||||
// field.
|
||||
DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"`
|
||||
// The comparison to apply between the time series (indicated by `filter`
|
||||
// and `aggregation`) and the threshold (indicated by `threshold_value`).
|
||||
// The comparison is applied on each time series, with the time series
|
||||
// on the left-hand side and the threshold on the right-hand side.
|
||||
//
|
||||
// Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently.
|
||||
Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"`
|
||||
// A value against which to compare the time series.
|
||||
ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"`
|
||||
// The amount of time that a time series must violate the
|
||||
// threshold to be considered failing. Currently, only values
|
||||
// that are a multiple of a minute--e.g. 60, 120, or 300
|
||||
// seconds--are supported. If an invalid value is given, an
|
||||
// error will be returned. The `Duration.nanos` field is
|
||||
// ignored. When choosing a duration, it is useful to keep in mind the
|
||||
// frequency of the underlying time series data (which may also be
|
||||
// affected by any alignments specified in the `aggregation` field);
|
||||
// a good duration is long enough so that a single outlier does not
|
||||
// generate spurious alerts, but short enough that unhealthy states
|
||||
// are detected and alerted on quickly.
|
||||
Duration *duration.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"`
|
||||
// The number/percent of time series for which the comparison must hold
|
||||
// in order for the condition to trigger. If unspecified, then the
|
||||
// condition will trigger if the comparison is true for any of the
|
||||
// time series that have been identified by `filter` and `aggregations`,
|
||||
// or by the ratio, if `denominator_filter` and `denominator_aggregations`
|
||||
// are specified.
|
||||
Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) Reset() { *m = AlertPolicy_Condition_MetricThreshold{} }
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) String() string { return proto.CompactTextString(m) }
|
||||
func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {}
|
||||
func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_17949719c044e638, []int{0, 1, 1}
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *AlertPolicy_Condition_MetricThreshold) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Merge(dst, src)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) XXX_Size() int {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Size(m)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AlertPolicy_Condition_MetricThreshold proto.InternalMessageInfo
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetFilter() string {
|
||||
if m != nil {
|
||||
return m.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation {
|
||||
if m != nil {
|
||||
return m.Aggregations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string {
|
||||
if m != nil {
|
||||
return m.DenominatorFilter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation {
|
||||
if m != nil {
|
||||
return m.DenominatorAggregations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType {
|
||||
if m != nil {
|
||||
return m.Comparison
|
||||
}
|
||||
return ComparisonType_COMPARISON_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 {
|
||||
if m != nil {
|
||||
return m.ThresholdValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetDuration() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.Duration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger {
|
||||
if m != nil {
|
||||
return m.Trigger
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A condition type that checks that monitored resources
|
||||
// are reporting data. The configuration defines a metric and
|
||||
// a set of monitored resources. The predicate is considered in violation
|
||||
// when a time series for the specified metric of a monitored
|
||||
// resource does not include any data in the specified `duration`.
|
||||
type AlertPolicy_Condition_MetricAbsence struct {
|
||||
// A [filter](/monitoring/api/v3/filters) that
|
||||
// identifies which time series should be compared with the threshold.
|
||||
//
|
||||
// The filter is similar to the one that is specified in the
|
||||
// [`MetricService.ListTimeSeries`
|
||||
// request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
|
||||
// call is useful to verify the time series that will be retrieved /
|
||||
// processed) and must specify the metric type and optionally may contain
|
||||
// restrictions on resource type, resource labels, and metric labels.
|
||||
// This field may not exceed 2048 Unicode characters in length.
|
||||
Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// Specifies the alignment of data points in individual time series as
|
||||
// well as how to combine the retrieved time series together (such as
|
||||
// when aggregating multiple streams on each resource to a single
|
||||
// stream for each resource or when aggregating streams across all
|
||||
// members of a group of resrouces). Multiple aggregations
|
||||
// are applied in the order specified.
|
||||
//
|
||||
// This field is similar to the
|
||||
// one in the [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
|
||||
// It is advisable to use the `ListTimeSeries` method when debugging this field.
|
||||
Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
|
||||
// The amount of time that a time series must fail to report new
|
||||
// data to be considered failing. Currently, only values that
|
||||
// are a multiple of a minute--e.g. 60, 120, or 300
|
||||
// seconds--are supported. If an invalid value is given, an
|
||||
// error will be returned. The `Duration.nanos` field is
|
||||
// ignored.
|
||||
Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
|
||||
// The number/percent of time series for which the comparison must hold
|
||||
// in order for the condition to trigger. If unspecified, then the
|
||||
// condition will trigger if the comparison is true for any of the
|
||||
// time series that have been identified by `filter` and `aggregations`.
|
||||
Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) Reset() { *m = AlertPolicy_Condition_MetricAbsence{} }
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) String() string { return proto.CompactTextString(m) }
|
||||
func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {}
|
||||
func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_17949719c044e638, []int{0, 1, 2}
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *AlertPolicy_Condition_MetricAbsence) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Merge(dst, src)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) XXX_Size() int {
|
||||
return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Size(m)
|
||||
}
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AlertPolicy_Condition_MetricAbsence proto.InternalMessageInfo
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) GetFilter() string {
|
||||
if m != nil {
|
||||
return m.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation {
|
||||
if m != nil {
|
||||
return m.Aggregations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) GetDuration() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.Duration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger {
|
||||
if m != nil {
|
||||
return m.Trigger
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*AlertPolicy)(nil), "google.monitoring.v3.AlertPolicy")
|
||||
proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.AlertPolicy.UserLabelsEntry")
|
||||
proto.RegisterType((*AlertPolicy_Documentation)(nil), "google.monitoring.v3.AlertPolicy.Documentation")
|
||||
proto.RegisterType((*AlertPolicy_Condition)(nil), "google.monitoring.v3.AlertPolicy.Condition")
|
||||
proto.RegisterType((*AlertPolicy_Condition_Trigger)(nil), "google.monitoring.v3.AlertPolicy.Condition.Trigger")
|
||||
proto.RegisterType((*AlertPolicy_Condition_MetricThreshold)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricThreshold")
|
||||
proto.RegisterType((*AlertPolicy_Condition_MetricAbsence)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricAbsence")
|
||||
proto.RegisterEnum("google.monitoring.v3.AlertPolicy_ConditionCombinerType", AlertPolicy_ConditionCombinerType_name, AlertPolicy_ConditionCombinerType_value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/alert.proto", fileDescriptor_alert_17949719c044e638)
|
||||
}
|
||||
|
||||
var fileDescriptor_alert_17949719c044e638 = []byte{
|
||||
// 941 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xeb, 0x6e, 0xe3, 0x44,
|
||||
0x14, 0xae, 0x93, 0xe6, 0x76, 0xd2, 0x36, 0xd9, 0xd9, 0xee, 0xae, 0x31, 0x68, 0x95, 0xae, 0x90,
|
||||
0x88, 0x40, 0x38, 0x22, 0x01, 0x71, 0x59, 0x81, 0x94, 0x5b, 0x37, 0x11, 0x24, 0xad, 0xa6, 0x69,
|
||||
0x91, 0x50, 0x25, 0xcb, 0x71, 0xa6, 0xae, 0x85, 0x3d, 0x63, 0x4d, 0xec, 0xa2, 0xbc, 0x0e, 0x3f,
|
||||
0x79, 0x14, 0x1e, 0x81, 0x7f, 0xbc, 0x02, 0xe2, 0x01, 0x90, 0xc7, 0x63, 0xc7, 0xe9, 0xa6, 0xbb,
|
||||
0x64, 0xf7, 0x5f, 0xce, 0x9c, 0xef, 0x7c, 0xe7, 0xf6, 0xcd, 0x38, 0xd0, 0xb0, 0x19, 0xb3, 0x5d,
|
||||
0xd2, 0xf2, 0x18, 0x75, 0x02, 0xc6, 0x1d, 0x6a, 0xb7, 0xee, 0x3a, 0x2d, 0xd3, 0x25, 0x3c, 0xd0,
|
||||
0x7d, 0xce, 0x02, 0x86, 0x8e, 0x63, 0x84, 0xbe, 0x46, 0xe8, 0x77, 0x1d, 0xed, 0x23, 0x19, 0x67,
|
||||
0xfa, 0x4e, 0xcb, 0xa4, 0x94, 0x05, 0x66, 0xe0, 0x30, 0xba, 0x8c, 0x63, 0xb4, 0x93, 0xad, 0xac,
|
||||
0x16, 0xf3, 0x3c, 0x46, 0x25, 0xe4, 0xd3, 0xad, 0x10, 0x2f, 0x8c, 0x89, 0x0c, 0x4e, 0x2c, 0xc6,
|
||||
0x17, 0x12, 0xfb, 0x5c, 0x62, 0x85, 0x35, 0x0f, 0x6f, 0x5a, 0x8b, 0x90, 0x0b, 0xd8, 0x43, 0xfe,
|
||||
0xdf, 0xb8, 0xe9, 0xfb, 0x84, 0xcb, 0x72, 0x5e, 0xfc, 0x5d, 0x83, 0x6a, 0x37, 0x6a, 0xe9, 0x9c,
|
||||
0xb9, 0x8e, 0xb5, 0x42, 0x08, 0xf6, 0xa9, 0xe9, 0x11, 0x55, 0x69, 0x28, 0xcd, 0x0a, 0x16, 0xbf,
|
||||
0xd1, 0x09, 0x1c, 0x2c, 0x9c, 0xa5, 0xef, 0x9a, 0x2b, 0x43, 0xf8, 0x72, 0xc2, 0x57, 0x95, 0x67,
|
||||
0xd3, 0x08, 0x72, 0x09, 0x87, 0x0b, 0x66, 0x85, 0x1e, 0xa1, 0x71, 0x91, 0xea, 0x61, 0x43, 0x69,
|
||||
0x56, 0xdb, 0x2d, 0x7d, 0xdb, 0x84, 0xf4, 0x4c, 0x42, 0x7d, 0x90, 0x0d, 0xc3, 0x9b, 0x2c, 0x08,
|
||||
0x43, 0x35, 0x5c, 0x12, 0x6e, 0xb8, 0xe6, 0x9c, 0xb8, 0x4b, 0xb5, 0xde, 0xc8, 0x37, 0xab, 0xed,
|
||||
0x2f, 0xde, 0x4e, 0x7a, 0xb9, 0x24, 0xfc, 0x27, 0x11, 0x33, 0xa4, 0x01, 0x5f, 0x61, 0x08, 0xd3,
|
||||
0x03, 0xf4, 0x23, 0x80, 0xc5, 0xe8, 0xc2, 0x11, 0x4b, 0x51, 0x0f, 0x04, 0xe5, 0x67, 0x6f, 0xa7,
|
||||
0xec, 0x27, 0x31, 0x38, 0x13, 0x8e, 0x2e, 0xa0, 0x6c, 0x31, 0x6f, 0xee, 0x50, 0xc2, 0xd5, 0x62,
|
||||
0x43, 0x69, 0x1e, 0xb5, 0xbf, 0xde, 0x81, 0xaa, 0x2f, 0x43, 0x67, 0x2b, 0x9f, 0xe0, 0x94, 0x08,
|
||||
0x7d, 0x09, 0x25, 0x42, 0xcd, 0xb9, 0x4b, 0x16, 0xea, 0x23, 0x31, 0x46, 0x2d, 0xe1, 0x4c, 0xb6,
|
||||
0xa8, 0xf7, 0x18, 0x73, 0xaf, 0x4c, 0x37, 0x24, 0x38, 0x81, 0xa2, 0x0e, 0x3c, 0xa1, 0x2c, 0x70,
|
||||
0x6e, 0x1c, 0x2b, 0x96, 0x89, 0x75, 0x6b, 0x52, 0x1a, 0x4d, 0xed, 0xa8, 0x91, 0x6f, 0x56, 0xf0,
|
||||
0x71, 0xd6, 0xd9, 0x97, 0x3e, 0x34, 0x81, 0x9a, 0xc5, 0x49, 0x56, 0x57, 0x2a, 0x88, 0x94, 0x1f,
|
||||
0x6f, 0x6f, 0x63, 0x22, 0x45, 0x88, 0x05, 0x16, 0x1f, 0x25, 0xc1, 0xb1, 0x1d, 0xd1, 0xdd, 0x93,
|
||||
0xa9, 0x5a, 0xdd, 0x85, 0xce, 0xdb, 0xb0, 0xb5, 0x53, 0x38, 0xdc, 0x90, 0x07, 0x52, 0xa1, 0x64,
|
||||
0x31, 0x1a, 0x10, 0x1a, 0x48, 0x81, 0x26, 0x26, 0xfa, 0x10, 0x2a, 0x9e, 0xe3, 0x11, 0x23, 0x58,
|
||||
0xf9, 0x89, 0x40, 0xcb, 0xd1, 0x41, 0x34, 0x5a, 0xed, 0xaf, 0x32, 0x54, 0xd2, 0xa1, 0xa7, 0x12,
|
||||
0x3f, 0x78, 0x83, 0xc4, 0x8b, 0xaf, 0x4b, 0x9c, 0xc2, 0xe3, 0x74, 0xf1, 0x46, 0x70, 0xcb, 0xc9,
|
||||
0xf2, 0x96, 0xb9, 0x0b, 0x51, 0x47, 0xb5, 0xfd, 0x72, 0x87, 0xad, 0xeb, 0x13, 0x12, 0x70, 0xc7,
|
||||
0x9a, 0x25, 0x14, 0xa3, 0x3d, 0x8c, 0x52, 0xe6, 0xf4, 0x14, 0xdd, 0x40, 0x7d, 0x9d, 0xcf, 0x9c,
|
||||
0x2f, 0xa3, 0xa6, 0x73, 0x22, 0xd9, 0xb7, 0xbb, 0x27, 0xeb, 0x46, 0xf1, 0x16, 0x19, 0xed, 0xe1,
|
||||
0x5a, 0x4a, 0x2a, 0xce, 0x02, 0x6d, 0x08, 0xa5, 0x19, 0x77, 0x6c, 0x9b, 0x70, 0xf4, 0x14, 0x0a,
|
||||
0x16, 0x0b, 0xe5, 0x70, 0x0b, 0xa3, 0x3d, 0x1c, 0x9b, 0x48, 0x83, 0x92, 0x4f, 0xb8, 0x95, 0x54,
|
||||
0xa0, 0x8c, 0xf6, 0x70, 0x72, 0xd0, 0x2b, 0xc2, 0x7e, 0x34, 0x73, 0xed, 0x9f, 0x3c, 0xd4, 0xee,
|
||||
0x35, 0x86, 0x9e, 0x42, 0xf1, 0xc6, 0x71, 0x03, 0xc2, 0xe5, 0x46, 0xa4, 0x85, 0x86, 0x70, 0x60,
|
||||
0xda, 0x36, 0x27, 0x76, 0xfc, 0x32, 0xaa, 0x65, 0x71, 0x09, 0x4f, 0x1e, 0x68, 0x6b, 0x8d, 0xc4,
|
||||
0x1b, 0x61, 0xe8, 0x73, 0x40, 0x0b, 0x42, 0x99, 0xe7, 0x50, 0x33, 0x60, 0xdc, 0x90, 0xa9, 0x2a,
|
||||
0x22, 0xd5, 0xa3, 0x8c, 0xe7, 0x34, 0xce, 0x7a, 0x0d, 0x6a, 0x16, 0xbe, 0x51, 0x01, 0xfc, 0xdf,
|
||||
0x0a, 0x9e, 0x65, 0x28, 0xba, 0xd9, 0x62, 0x06, 0xd1, 0xb3, 0xe2, 0xf9, 0x26, 0x77, 0x96, 0x8c,
|
||||
0xaa, 0xfb, 0xe2, 0x2d, 0x78, 0x40, 0xf5, 0xfd, 0x14, 0x27, 0x2e, 0x7e, 0x26, 0x0e, 0x7d, 0x02,
|
||||
0xb5, 0x54, 0x5a, 0xc6, 0x5d, 0x74, 0xc1, 0xd5, 0x42, 0x34, 0x71, 0x7c, 0x94, 0x1e, 0x8b, 0x6b,
|
||||
0x8f, 0xbe, 0x82, 0x72, 0xf2, 0xd2, 0x0b, 0xb1, 0x56, 0xdb, 0x1f, 0xbc, 0xf6, 0x48, 0x0c, 0x24,
|
||||
0x00, 0xa7, 0x50, 0x34, 0x81, 0x52, 0x10, 0x2f, 0x5b, 0x2d, 0x89, 0xa8, 0xce, 0x2e, 0x5a, 0x92,
|
||||
0x3a, 0xc1, 0x09, 0x87, 0xf6, 0xaf, 0x02, 0x87, 0x1b, 0x02, 0xcb, 0xac, 0x5c, 0x79, 0xe3, 0xca,
|
||||
0x0b, 0xef, 0xb6, 0xf2, 0x6c, 0xdb, 0xb9, 0x77, 0x6a, 0x3b, 0xff, 0xfe, 0x6d, 0xf7, 0xaa, 0x50,
|
||||
0x49, 0x6f, 0x91, 0xf6, 0x3d, 0xd4, 0xee, 0x7d, 0x6e, 0x50, 0x1d, 0xf2, 0xbf, 0x92, 0x95, 0x9c,
|
||||
0x40, 0xf4, 0x13, 0x1d, 0x43, 0x21, 0xde, 0x66, 0x7c, 0x11, 0x62, 0xe3, 0xbb, 0xdc, 0x37, 0xca,
|
||||
0x0b, 0x13, 0x9e, 0x6c, 0xfd, 0x1e, 0xa0, 0x67, 0xf0, 0xb8, 0x7f, 0x36, 0xe9, 0x8d, 0xa7, 0x43,
|
||||
0xe3, 0x72, 0x7a, 0x71, 0x3e, 0xec, 0x8f, 0x4f, 0xc7, 0xc3, 0x41, 0x7d, 0x0f, 0x95, 0x20, 0xdf,
|
||||
0x9d, 0x0e, 0xea, 0x0a, 0x2a, 0x42, 0xee, 0x0c, 0xd7, 0x73, 0xe8, 0x39, 0x68, 0xdd, 0xe9, 0xc0,
|
||||
0xf8, 0x79, 0x3c, 0x1b, 0x19, 0x93, 0xee, 0xac, 0x3f, 0x1a, 0x4f, 0x5f, 0x19, 0x78, 0x78, 0x71,
|
||||
0x76, 0x89, 0xfb, 0xc3, 0x7a, 0xbe, 0xf7, 0xbb, 0x02, 0xaa, 0xc5, 0xbc, 0xad, 0x2d, 0xf7, 0x20,
|
||||
0xee, 0x39, 0x1a, 0xde, 0xb9, 0xf2, 0xcb, 0x0f, 0x12, 0x63, 0x33, 0xd7, 0xa4, 0xb6, 0xce, 0xb8,
|
||||
0xdd, 0xb2, 0x09, 0x15, 0xa3, 0x6d, 0xc5, 0x2e, 0xd3, 0x77, 0x96, 0x9b, 0xff, 0x4c, 0x5e, 0xae,
|
||||
0xad, 0x3f, 0x72, 0xda, 0xab, 0x98, 0xa0, 0xef, 0xb2, 0x70, 0xa1, 0x4f, 0xd6, 0xa9, 0xae, 0x3a,
|
||||
0x7f, 0x26, 0xce, 0x6b, 0xe1, 0xbc, 0x5e, 0x3b, 0xaf, 0xaf, 0x3a, 0xf3, 0xa2, 0x48, 0xd2, 0xf9,
|
||||
0x2f, 0x00, 0x00, 0xff, 0xff, 0x66, 0xb5, 0x16, 0x64, 0x76, 0x09, 0x00, 0x00,
|
||||
}
|
667
vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go
generated
vendored
Normal file
667
vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go
generated
vendored
Normal file
@@ -0,0 +1,667 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/alert_service.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import empty "github.com/golang/protobuf/ptypes/empty"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import field_mask "google.golang.org/genproto/protobuf/field_mask"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The protocol for the `CreateAlertPolicy` request.
|
||||
type CreateAlertPolicyRequest struct {
|
||||
// The project in which to create the alerting policy. The format is
|
||||
// `projects/[PROJECT_ID]`.
|
||||
//
|
||||
// Note that this field names the parent container in which the alerting
|
||||
// policy will be written, not the name of the created policy. The alerting
|
||||
// policy that is returned will have a name that contains a normalized
|
||||
// representation of this name as a prefix but adds a suffix of the form
|
||||
// `/alertPolicies/[POLICY_ID]`, identifying the policy in the container.
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The requested alerting policy. You should omit the `name` field in this
|
||||
// policy. The name will be returned in the new policy, including
|
||||
// a new [ALERT_POLICY_ID] value.
|
||||
AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateAlertPolicyRequest) Reset() { *m = CreateAlertPolicyRequest{} }
|
||||
func (m *CreateAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateAlertPolicyRequest) ProtoMessage() {}
|
||||
func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_service_2f734ae33022c87f, []int{0}
|
||||
}
|
||||
func (m *CreateAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateAlertPolicyRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CreateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateAlertPolicyRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *CreateAlertPolicyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateAlertPolicyRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *CreateAlertPolicyRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateAlertPolicyRequest.Size(m)
|
||||
}
|
||||
func (m *CreateAlertPolicyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateAlertPolicyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateAlertPolicyRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateAlertPolicyRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
|
||||
if m != nil {
|
||||
return m.AlertPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The protocol for the `GetAlertPolicy` request.
|
||||
type GetAlertPolicyRequest struct {
|
||||
// The alerting policy to retrieve. The format is
|
||||
//
|
||||
// projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetAlertPolicyRequest) Reset() { *m = GetAlertPolicyRequest{} }
|
||||
func (m *GetAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetAlertPolicyRequest) ProtoMessage() {}
|
||||
func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_service_2f734ae33022c87f, []int{1}
|
||||
}
|
||||
func (m *GetAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetAlertPolicyRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GetAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetAlertPolicyRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *GetAlertPolicyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetAlertPolicyRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *GetAlertPolicyRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetAlertPolicyRequest.Size(m)
|
||||
}
|
||||
func (m *GetAlertPolicyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GetAlertPolicyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GetAlertPolicyRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *GetAlertPolicyRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `ListAlertPolicies` request.
|
||||
type ListAlertPoliciesRequest struct {
|
||||
// The project whose alert policies are to be listed. The format is
|
||||
//
|
||||
// projects/[PROJECT_ID]
|
||||
//
|
||||
// Note that this field names the parent container in which the alerting
|
||||
// policies to be listed are stored. To retrieve a single alerting policy
|
||||
// by name, use the
|
||||
// [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy]
|
||||
// operation, instead.
|
||||
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// If provided, this field specifies the criteria that must be met by
|
||||
// alert policies to be included in the response.
|
||||
//
|
||||
// For more details, see [sorting and
|
||||
// filtering](/monitoring/api/v3/sorting-and-filtering).
|
||||
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// A comma-separated list of fields by which to sort the result. Supports
|
||||
// the same set of field references as the `filter` field. Entries can be
|
||||
// prefixed with a minus sign to sort by the field in descending order.
|
||||
//
|
||||
// For more details, see [sorting and
|
||||
// filtering](/monitoring/api/v3/sorting-and-filtering).
|
||||
OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
|
||||
// The maximum number of results to return in a single response.
|
||||
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
// If this field is not empty then it must contain the `nextPageToken` value
|
||||
// returned by a previous call to this method. Using this field causes the
|
||||
// method to return more results from the previous method call.
|
||||
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListAlertPoliciesRequest) Reset() { *m = ListAlertPoliciesRequest{} }
|
||||
func (m *ListAlertPoliciesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListAlertPoliciesRequest) ProtoMessage() {}
|
||||
func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_service_2f734ae33022c87f, []int{2}
|
||||
}
|
||||
func (m *ListAlertPoliciesRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListAlertPoliciesRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListAlertPoliciesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListAlertPoliciesRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListAlertPoliciesRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListAlertPoliciesRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *ListAlertPoliciesRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListAlertPoliciesRequest.Size(m)
|
||||
}
|
||||
func (m *ListAlertPoliciesRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListAlertPoliciesRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListAlertPoliciesRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListAlertPoliciesRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListAlertPoliciesRequest) GetFilter() string {
|
||||
if m != nil {
|
||||
return m.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListAlertPoliciesRequest) GetOrderBy() string {
|
||||
if m != nil {
|
||||
return m.OrderBy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListAlertPoliciesRequest) GetPageSize() int32 {
|
||||
if m != nil {
|
||||
return m.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ListAlertPoliciesRequest) GetPageToken() string {
|
||||
if m != nil {
|
||||
return m.PageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `ListAlertPolicies` response.
|
||||
type ListAlertPoliciesResponse struct {
|
||||
// The returned alert policies.
|
||||
AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"`
|
||||
// If there might be more results than were returned, then this field is set
|
||||
// to a non-empty value. To see the additional results,
|
||||
// use that value as `pageToken` in the next call to this method.
|
||||
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListAlertPoliciesResponse) Reset() { *m = ListAlertPoliciesResponse{} }
|
||||
func (m *ListAlertPoliciesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListAlertPoliciesResponse) ProtoMessage() {}
|
||||
func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_service_2f734ae33022c87f, []int{3}
|
||||
}
|
||||
func (m *ListAlertPoliciesResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListAlertPoliciesResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListAlertPoliciesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListAlertPoliciesResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListAlertPoliciesResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListAlertPoliciesResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *ListAlertPoliciesResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListAlertPoliciesResponse.Size(m)
|
||||
}
|
||||
func (m *ListAlertPoliciesResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListAlertPoliciesResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListAlertPoliciesResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy {
|
||||
if m != nil {
|
||||
return m.AlertPolicies
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ListAlertPoliciesResponse) GetNextPageToken() string {
|
||||
if m != nil {
|
||||
return m.NextPageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `UpdateAlertPolicy` request.
|
||||
type UpdateAlertPolicyRequest struct {
|
||||
// Optional. A list of alerting policy field names. If this field is not
|
||||
// empty, each listed field in the existing alerting policy is set to the
|
||||
// value of the corresponding field in the supplied policy (`alert_policy`),
|
||||
// or to the field's default value if the field is not in the supplied
|
||||
// alerting policy. Fields not listed retain their previous value.
|
||||
//
|
||||
// Examples of valid field masks include `display_name`, `documentation`,
|
||||
// `documentation.content`, `documentation.mime_type`, `user_labels`,
|
||||
// `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc.
|
||||
//
|
||||
// If this field is empty, then the supplied alerting policy replaces the
|
||||
// existing policy. It is the same as deleting the existing policy and
|
||||
// adding the supplied policy, except for the following:
|
||||
//
|
||||
// + The new policy will have the same `[ALERT_POLICY_ID]` as the former
|
||||
// policy. This gives you continuity with the former policy in your
|
||||
// notifications and incidents.
|
||||
// + Conditions in the new policy will keep their former `[CONDITION_ID]` if
|
||||
// the supplied condition includes the `name` field with that
|
||||
// `[CONDITION_ID]`. If the supplied condition omits the `name` field,
|
||||
// then a new `[CONDITION_ID]` is created.
|
||||
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
|
||||
// Required. The updated alerting policy or the updated values for the
|
||||
// fields listed in `update_mask`.
|
||||
// If `update_mask` is not empty, any fields in this policy that are
|
||||
// not in `update_mask` are ignored.
|
||||
AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UpdateAlertPolicyRequest) Reset() { *m = UpdateAlertPolicyRequest{} }
|
||||
func (m *UpdateAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*UpdateAlertPolicyRequest) ProtoMessage() {}
|
||||
func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_service_2f734ae33022c87f, []int{4}
|
||||
}
|
||||
func (m *UpdateAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UpdateAlertPolicyRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UpdateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UpdateAlertPolicyRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UpdateAlertPolicyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UpdateAlertPolicyRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *UpdateAlertPolicyRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_UpdateAlertPolicyRequest.Size(m)
|
||||
}
|
||||
func (m *UpdateAlertPolicyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UpdateAlertPolicyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UpdateAlertPolicyRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *UpdateAlertPolicyRequest) GetUpdateMask() *field_mask.FieldMask {
|
||||
if m != nil {
|
||||
return m.UpdateMask
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
|
||||
if m != nil {
|
||||
return m.AlertPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The protocol for the `DeleteAlertPolicy` request.
|
||||
type DeleteAlertPolicyRequest struct {
|
||||
// The alerting policy to delete. The format is:
|
||||
//
|
||||
// projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
|
||||
//
|
||||
// For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy].
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteAlertPolicyRequest) Reset() { *m = DeleteAlertPolicyRequest{} }
|
||||
func (m *DeleteAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteAlertPolicyRequest) ProtoMessage() {}
|
||||
func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_alert_service_2f734ae33022c87f, []int{5}
|
||||
}
|
||||
func (m *DeleteAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteAlertPolicyRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DeleteAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteAlertPolicyRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *DeleteAlertPolicyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteAlertPolicyRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *DeleteAlertPolicyRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteAlertPolicyRequest.Size(m)
|
||||
}
|
||||
func (m *DeleteAlertPolicyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DeleteAlertPolicyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DeleteAlertPolicyRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DeleteAlertPolicyRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*CreateAlertPolicyRequest)(nil), "google.monitoring.v3.CreateAlertPolicyRequest")
|
||||
proto.RegisterType((*GetAlertPolicyRequest)(nil), "google.monitoring.v3.GetAlertPolicyRequest")
|
||||
proto.RegisterType((*ListAlertPoliciesRequest)(nil), "google.monitoring.v3.ListAlertPoliciesRequest")
|
||||
proto.RegisterType((*ListAlertPoliciesResponse)(nil), "google.monitoring.v3.ListAlertPoliciesResponse")
|
||||
proto.RegisterType((*UpdateAlertPolicyRequest)(nil), "google.monitoring.v3.UpdateAlertPolicyRequest")
|
||||
proto.RegisterType((*DeleteAlertPolicyRequest)(nil), "google.monitoring.v3.DeleteAlertPolicyRequest")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// AlertPolicyServiceClient is the client API for AlertPolicyService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type AlertPolicyServiceClient interface {
|
||||
// Lists the existing alerting policies for the project.
|
||||
ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error)
|
||||
// Gets a single alerting policy.
|
||||
GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
|
||||
// Creates a new alerting policy.
|
||||
CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
|
||||
// Deletes an alerting policy.
|
||||
DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Updates an alerting policy. You can either replace the entire policy with
|
||||
// a new one or replace only certain fields in the current alerting policy by
|
||||
// specifying the fields to be updated via `updateMask`. Returns the
|
||||
// updated alerting policy.
|
||||
UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
|
||||
}
|
||||
|
||||
type alertPolicyServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewAlertPolicyServiceClient(cc *grpc.ClientConn) AlertPolicyServiceClient {
|
||||
return &alertPolicyServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) {
|
||||
out := new(ListAlertPoliciesResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
|
||||
out := new(AlertPolicy)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
|
||||
out := new(AlertPolicy)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
|
||||
out := new(AlertPolicy)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// AlertPolicyServiceServer is the server API for AlertPolicyService service.
|
||||
type AlertPolicyServiceServer interface {
|
||||
// Lists the existing alerting policies for the project.
|
||||
ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error)
|
||||
// Gets a single alerting policy.
|
||||
GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error)
|
||||
// Creates a new alerting policy.
|
||||
CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error)
|
||||
// Deletes an alerting policy.
|
||||
DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*empty.Empty, error)
|
||||
// Updates an alerting policy. You can either replace the entire policy with
|
||||
// a new one or replace only certain fields in the current alerting policy by
|
||||
// specifying the fields to be updated via `updateMask`. Returns the
|
||||
// updated alerting policy.
|
||||
UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error)
|
||||
}
|
||||
|
||||
func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) {
|
||||
s.RegisterService(&_AlertPolicyService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListAlertPoliciesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateAlertPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.monitoring.v3.AlertPolicyService",
|
||||
HandlerType: (*AlertPolicyServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ListAlertPolicies",
|
||||
Handler: _AlertPolicyService_ListAlertPolicies_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetAlertPolicy",
|
||||
Handler: _AlertPolicyService_GetAlertPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateAlertPolicy",
|
||||
Handler: _AlertPolicyService_CreateAlertPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteAlertPolicy",
|
||||
Handler: _AlertPolicyService_DeleteAlertPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateAlertPolicy",
|
||||
Handler: _AlertPolicyService_UpdateAlertPolicy_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/monitoring/v3/alert_service.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/alert_service.proto", fileDescriptor_alert_service_2f734ae33022c87f)
|
||||
}
|
||||
|
||||
var fileDescriptor_alert_service_2f734ae33022c87f = []byte{
|
||||
// 656 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0xd3, 0x4c,
|
||||
0x10, 0x95, 0x93, 0x36, 0x5f, 0xbb, 0xfd, 0x5a, 0x94, 0x15, 0x54, 0xae, 0x0b, 0x52, 0x30, 0x2a,
|
||||
0x54, 0xad, 0xb0, 0xa5, 0xf8, 0x04, 0x15, 0x48, 0xa4, 0x85, 0xf6, 0x40, 0xa5, 0x28, 0x85, 0x1e,
|
||||
0x50, 0xa4, 0x68, 0x93, 0x4c, 0xac, 0x25, 0x8e, 0xd7, 0x78, 0x37, 0x11, 0x29, 0xea, 0x85, 0x23,
|
||||
0x12, 0xe2, 0xc0, 0x99, 0x03, 0x47, 0x38, 0x20, 0x7e, 0x07, 0x57, 0xfe, 0x02, 0x3f, 0x04, 0x79,
|
||||
0xed, 0x34, 0x76, 0x6d, 0xab, 0x16, 0xb7, 0xcc, 0xce, 0xdb, 0x99, 0xb7, 0x6f, 0xde, 0x38, 0x68,
|
||||
0xdb, 0x66, 0xcc, 0x76, 0xc0, 0x1c, 0x31, 0x97, 0x0a, 0xe6, 0x53, 0xd7, 0x36, 0x27, 0x96, 0x49,
|
||||
0x1c, 0xf0, 0x45, 0x87, 0x83, 0x3f, 0xa1, 0x3d, 0x30, 0x3c, 0x9f, 0x09, 0x86, 0xaf, 0x87, 0x48,
|
||||
0x63, 0x8e, 0x34, 0x26, 0x96, 0x76, 0x33, 0xba, 0x4f, 0x3c, 0x6a, 0x12, 0xd7, 0x65, 0x82, 0x08,
|
||||
0xca, 0x5c, 0x1e, 0xde, 0xd1, 0x6a, 0xf9, 0xd5, 0x23, 0xc4, 0x66, 0x84, 0x90, 0x51, 0x77, 0x3c,
|
||||
0x30, 0x61, 0xe4, 0x89, 0xe9, 0xa5, 0xeb, 0x17, 0xc9, 0x01, 0x05, 0xa7, 0xdf, 0x19, 0x11, 0x3e,
|
||||
0x0c, 0x11, 0xba, 0x40, 0xea, 0xbe, 0x0f, 0x44, 0xc0, 0x93, 0xa0, 0x66, 0x93, 0x39, 0xb4, 0x37,
|
||||
0x6d, 0xc1, 0x9b, 0x31, 0x70, 0x81, 0x31, 0x5a, 0x70, 0xc9, 0x08, 0xd4, 0x72, 0x4d, 0xd9, 0x5e,
|
||||
0x6e, 0xc9, 0xdf, 0xf8, 0x00, 0xfd, 0x1f, 0xbe, 0xcd, 0x93, 0x50, 0xb5, 0x54, 0x53, 0xb6, 0x57,
|
||||
0xea, 0xb7, 0x8d, 0xac, 0xb7, 0x19, 0xf1, 0x9a, 0x2b, 0x64, 0x1e, 0xe8, 0xbb, 0xe8, 0xc6, 0x21,
|
||||
0x88, 0x62, 0x2d, 0xf5, 0x2f, 0x0a, 0x52, 0x9f, 0x53, 0x1e, 0x83, 0x53, 0xe0, 0x97, 0x2f, 0x2c,
|
||||
0xc4, 0x38, 0xae, 0xa3, 0xca, 0x80, 0x3a, 0x02, 0x7c, 0x75, 0x51, 0x9e, 0x46, 0x11, 0xde, 0x40,
|
||||
0x4b, 0xcc, 0xef, 0x83, 0xdf, 0xe9, 0x4e, 0xd5, 0x8a, 0xcc, 0xfc, 0x27, 0xe3, 0xc6, 0x14, 0x6f,
|
||||
0xa2, 0x65, 0x8f, 0xd8, 0xd0, 0xe1, 0xf4, 0x0c, 0xe4, 0x9b, 0x16, 0x5b, 0x4b, 0xc1, 0xc1, 0x09,
|
||||
0x3d, 0x03, 0x7c, 0x0b, 0x21, 0x99, 0x14, 0x6c, 0x08, 0x6e, 0x44, 0x4d, 0xc2, 0x5f, 0x04, 0x07,
|
||||
0xfa, 0x47, 0x05, 0x6d, 0x64, 0xf0, 0xe3, 0x1e, 0x73, 0x39, 0xe0, 0x23, 0xb4, 0x16, 0x13, 0x8c,
|
||||
0x02, 0x57, 0xcb, 0xb5, 0x72, 0x31, 0xc9, 0x56, 0x49, 0xbc, 0x22, 0xbe, 0x8b, 0xae, 0xb9, 0xf0,
|
||||
0x56, 0x74, 0x62, 0x5c, 0x4a, 0x92, 0xcb, 0x6a, 0x70, 0xdc, 0xbc, 0xe0, 0x13, 0xe8, 0xf5, 0xd2,
|
||||
0xeb, 0x67, 0xcf, 0x74, 0x0f, 0xad, 0x8c, 0x65, 0x4e, 0x9a, 0x20, 0x1a, 0x9f, 0x36, 0xe3, 0x32,
|
||||
0xf3, 0x89, 0xf1, 0x2c, 0xf0, 0xc9, 0x31, 0xe1, 0xc3, 0x16, 0x0a, 0xe1, 0xc1, 0xef, 0xd4, 0xf0,
|
||||
0xcb, 0xff, 0x34, 0x7c, 0x03, 0xa9, 0x07, 0xe0, 0x40, 0x51, 0xcb, 0xd5, 0x7f, 0x54, 0x10, 0x8e,
|
||||
0x41, 0x4f, 0xc2, 0xa5, 0xc2, 0x5f, 0x15, 0x54, 0x4d, 0xc9, 0x8e, 0x8d, 0x6c, 0x32, 0x79, 0xfe,
|
||||
0xd1, 0xcc, 0xc2, 0xf8, 0x70, 0x9e, 0xfa, 0xee, 0xfb, 0xdf, 0x7f, 0x3e, 0x97, 0xb6, 0xf0, 0x9d,
|
||||
0x60, 0x11, 0xdf, 0x05, 0x04, 0x1f, 0x79, 0x3e, 0x7b, 0x0d, 0x3d, 0xc1, 0xcd, 0x9d, 0x73, 0x33,
|
||||
0x39, 0xb2, 0x4f, 0x0a, 0x5a, 0x4b, 0x1a, 0x1d, 0xef, 0x66, 0x37, 0xcc, 0x5c, 0x07, 0xed, 0x6a,
|
||||
0x69, 0xf5, 0xfb, 0x92, 0xcf, 0x3d, 0xbc, 0x95, 0xc5, 0x27, 0x49, 0xc7, 0xdc, 0x39, 0x97, 0xaa,
|
||||
0xa5, 0x16, 0x3e, 0x4f, 0xb5, 0xbc, 0x2f, 0x43, 0x11, 0x5e, 0x0f, 0x24, 0x2f, 0x4b, 0x2f, 0xa2,
|
||||
0xd3, 0xc3, 0x84, 0xad, 0xf0, 0x07, 0x05, 0x55, 0x53, 0x0e, 0xc9, 0xe3, 0x98, 0x67, 0x25, 0x6d,
|
||||
0x3d, 0x65, 0xea, 0xa7, 0xc1, 0x97, 0x71, 0x26, 0xd8, 0x4e, 0x41, 0xc1, 0x7e, 0x2a, 0xa8, 0x9a,
|
||||
0xda, 0xa6, 0x3c, 0x32, 0x79, 0x6b, 0x57, 0x44, 0xb0, 0x23, 0xc9, 0xab, 0x51, 0xaf, 0x4b, 0x5e,
|
||||
0x71, 0x41, 0x8c, 0xab, 0x48, 0x26, 0xf5, 0x6b, 0x7c, 0x53, 0x90, 0xda, 0x63, 0xa3, 0xcc, 0x96,
|
||||
0x8d, 0xaa, 0xec, 0x19, 0x2d, 0x51, 0x33, 0x90, 0xa6, 0xa9, 0xbc, 0x7a, 0x1c, 0x41, 0x6d, 0xe6,
|
||||
0x10, 0xd7, 0x36, 0x98, 0x6f, 0x9b, 0x36, 0xb8, 0x52, 0x38, 0x33, 0x4c, 0x11, 0x8f, 0xf2, 0xe4,
|
||||
0xbf, 0xd0, 0xde, 0x3c, 0xfa, 0x5e, 0xd2, 0x0e, 0xc3, 0x02, 0xfb, 0x0e, 0x1b, 0xf7, 0x8d, 0xe3,
|
||||
0x79, 0xc7, 0x53, 0xeb, 0xd7, 0x2c, 0xd9, 0x96, 0xc9, 0xf6, 0x3c, 0xd9, 0x3e, 0xb5, 0xba, 0x15,
|
||||
0xd9, 0xc4, 0xfa, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x1f, 0xe6, 0xf0, 0x47, 0x07, 0x00, 0x00,
|
||||
}
|
880
vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
generated
vendored
Normal file
880
vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
generated
vendored
Normal file
@@ -0,0 +1,880 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/common.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import duration "github.com/golang/protobuf/ptypes/duration"
|
||||
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import distribution "google.golang.org/genproto/googleapis/api/distribution"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Specifies an ordering relationship on two arguments, here called left and
|
||||
// right.
|
||||
type ComparisonType int32
|
||||
|
||||
const (
|
||||
// No ordering relationship is specified.
|
||||
ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0
|
||||
// The left argument is greater than the right argument.
|
||||
ComparisonType_COMPARISON_GT ComparisonType = 1
|
||||
// The left argument is greater than or equal to the right argument.
|
||||
ComparisonType_COMPARISON_GE ComparisonType = 2
|
||||
// The left argument is less than the right argument.
|
||||
ComparisonType_COMPARISON_LT ComparisonType = 3
|
||||
// The left argument is less than or equal to the right argument.
|
||||
ComparisonType_COMPARISON_LE ComparisonType = 4
|
||||
// The left argument is equal to the right argument.
|
||||
ComparisonType_COMPARISON_EQ ComparisonType = 5
|
||||
// The left argument is not equal to the right argument.
|
||||
ComparisonType_COMPARISON_NE ComparisonType = 6
|
||||
)
|
||||
|
||||
var ComparisonType_name = map[int32]string{
|
||||
0: "COMPARISON_UNSPECIFIED",
|
||||
1: "COMPARISON_GT",
|
||||
2: "COMPARISON_GE",
|
||||
3: "COMPARISON_LT",
|
||||
4: "COMPARISON_LE",
|
||||
5: "COMPARISON_EQ",
|
||||
6: "COMPARISON_NE",
|
||||
}
|
||||
var ComparisonType_value = map[string]int32{
|
||||
"COMPARISON_UNSPECIFIED": 0,
|
||||
"COMPARISON_GT": 1,
|
||||
"COMPARISON_GE": 2,
|
||||
"COMPARISON_LT": 3,
|
||||
"COMPARISON_LE": 4,
|
||||
"COMPARISON_EQ": 5,
|
||||
"COMPARISON_NE": 6,
|
||||
}
|
||||
|
||||
func (x ComparisonType) String() string {
|
||||
return proto.EnumName(ComparisonType_name, int32(x))
|
||||
}
|
||||
func (ComparisonType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_common_006e316316847821, []int{0}
|
||||
}
|
||||
|
||||
// The tier of service for a Stackdriver account. Please see the
|
||||
// [service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers)
|
||||
// for more details.
|
||||
type ServiceTier int32
|
||||
|
||||
const (
|
||||
// An invalid sentinel value, used to indicate that a tier has not
|
||||
// been provided explicitly.
|
||||
ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0
|
||||
// The Stackdriver Basic tier, a free tier of service that provides basic
|
||||
// features, a moderate allotment of logs, and access to built-in metrics.
|
||||
// A number of features are not available in this tier. For more details,
|
||||
// see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers).
|
||||
ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1
|
||||
// The Stackdriver Premium tier, a higher, more expensive tier of service
|
||||
// that provides access to all Stackdriver features, lets you use Stackdriver
|
||||
// with AWS accounts, and has a larger allotments for logs and metrics. For
|
||||
// more details, see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers).
|
||||
ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2
|
||||
)
|
||||
|
||||
var ServiceTier_name = map[int32]string{
|
||||
0: "SERVICE_TIER_UNSPECIFIED",
|
||||
1: "SERVICE_TIER_BASIC",
|
||||
2: "SERVICE_TIER_PREMIUM",
|
||||
}
|
||||
var ServiceTier_value = map[string]int32{
|
||||
"SERVICE_TIER_UNSPECIFIED": 0,
|
||||
"SERVICE_TIER_BASIC": 1,
|
||||
"SERVICE_TIER_PREMIUM": 2,
|
||||
}
|
||||
|
||||
func (x ServiceTier) String() string {
|
||||
return proto.EnumName(ServiceTier_name, int32(x))
|
||||
}
|
||||
func (ServiceTier) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_common_006e316316847821, []int{1}
|
||||
}
|
||||
|
||||
// The Aligner describes how to bring the data points in a single
|
||||
// time series into temporal alignment.
|
||||
type Aggregation_Aligner int32
|
||||
|
||||
const (
|
||||
// No alignment. Raw data is returned. Not valid if cross-time
|
||||
// series reduction is requested. The value type of the result is
|
||||
// the same as the value type of the input.
|
||||
Aggregation_ALIGN_NONE Aggregation_Aligner = 0
|
||||
// Align and convert to delta metric type. This alignment is valid
|
||||
// for cumulative metrics and delta metrics. Aligning an existing
|
||||
// delta metric to a delta metric requires that the alignment
|
||||
// period be increased. The value type of the result is the same
|
||||
// as the value type of the input.
|
||||
//
|
||||
// One can think of this aligner as a rate but without time units; that
|
||||
// is, the output is conceptually (second_point - first_point).
|
||||
Aggregation_ALIGN_DELTA Aggregation_Aligner = 1
|
||||
// Align and convert to a rate. This alignment is valid for
|
||||
// cumulative metrics and delta metrics with numeric values. The output is a
|
||||
// gauge metric with value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
//
|
||||
// One can think of this aligner as conceptually providing the slope of
|
||||
// the line that passes through the value at the start and end of the
|
||||
// window. In other words, this is conceptually ((y1 - y0)/(t1 - t0)),
|
||||
// and the output unit is one that has a "/time" dimension.
|
||||
//
|
||||
// If, by rate, you are looking for percentage change, see the
|
||||
// `ALIGN_PERCENT_CHANGE` aligner option.
|
||||
Aggregation_ALIGN_RATE Aggregation_Aligner = 2
|
||||
// Align by interpolating between adjacent points around the
|
||||
// period boundary. This alignment is valid for gauge
|
||||
// metrics with numeric values. The value type of the result is the same
|
||||
// as the value type of the input.
|
||||
Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3
|
||||
// Align by shifting the oldest data point before the period
|
||||
// boundary to the boundary. This alignment is valid for gauge
|
||||
// metrics. The value type of the result is the same as the
|
||||
// value type of the input.
|
||||
Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the minimum of all data points in the
|
||||
// period. This alignment is valid for gauge and delta metrics with numeric
|
||||
// values. The value type of the result is the same as the value
|
||||
// type of the input.
|
||||
Aggregation_ALIGN_MIN Aggregation_Aligner = 10
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the maximum of all data points in the
|
||||
// period. This alignment is valid for gauge and delta metrics with numeric
|
||||
// values. The value type of the result is the same as the value
|
||||
// type of the input.
|
||||
Aggregation_ALIGN_MAX Aggregation_Aligner = 11
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the average or arithmetic mean of all
|
||||
// data points in the period. This alignment is valid for gauge and delta
|
||||
// metrics with numeric values. The value type of the output is
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_MEAN Aggregation_Aligner = 12
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the count of all data points in the
|
||||
// period. This alignment is valid for gauge and delta metrics with numeric
|
||||
// or Boolean values. The value type of the output is
|
||||
// [INT64][google.api.MetricDescriptor.ValueType.INT64].
|
||||
Aggregation_ALIGN_COUNT Aggregation_Aligner = 13
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the sum of all data points in the
|
||||
// period. This alignment is valid for gauge and delta metrics with numeric
|
||||
// and distribution values. The value type of the output is the
|
||||
// same as the value type of the input.
|
||||
Aggregation_ALIGN_SUM Aggregation_Aligner = 14
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the standard deviation of all data
|
||||
// points in the period. This alignment is valid for gauge and delta metrics
|
||||
// with numeric values. The value type of the output is
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the count of True-valued data points in the
|
||||
// period. This alignment is valid for gauge metrics with
|
||||
// Boolean values. The value type of the output is
|
||||
// [INT64][google.api.MetricDescriptor.ValueType.INT64].
|
||||
Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the count of False-valued data points in the
|
||||
// period. This alignment is valid for gauge metrics with
|
||||
// Boolean values. The value type of the output is
|
||||
// [INT64][google.api.MetricDescriptor.ValueType.INT64].
|
||||
Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the fraction of True-valued data points in the
|
||||
// period. This alignment is valid for gauge metrics with Boolean values.
|
||||
// The output value is in the range [0, 1] and has value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the 99th percentile of all data
|
||||
// points in the period. This alignment is valid for gauge and delta metrics
|
||||
// with distribution values. The output is a gauge metric with value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the 95th percentile of all data
|
||||
// points in the period. This alignment is valid for gauge and delta metrics
|
||||
// with distribution values. The output is a gauge metric with value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the 50th percentile of all data
|
||||
// points in the period. This alignment is valid for gauge and delta metrics
|
||||
// with distribution values. The output is a gauge metric with value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20
|
||||
// Align time series via aggregation. The resulting data point in
|
||||
// the alignment period is the 5th percentile of all data
|
||||
// points in the period. This alignment is valid for gauge and delta metrics
|
||||
// with distribution values. The output is a gauge metric with value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21
|
||||
// Align and convert to a percentage change. This alignment is valid for
|
||||
// gauge and delta metrics with numeric values. This alignment conceptually
|
||||
// computes the equivalent of "((current - previous)/previous)*100"
|
||||
// where previous value is determined based on the alignmentPeriod.
|
||||
// In the event that previous is 0 the calculated value is infinity with the
|
||||
// exception that if both (current - previous) and previous are 0 the
|
||||
// calculated value is 0.
|
||||
// A 10 minute moving mean is computed at each point of the time window
|
||||
// prior to the above calculation to smooth the metric and prevent false
|
||||
// positives from very short lived spikes.
|
||||
// Only applicable for data that is >= 0. Any values < 0 are treated as
|
||||
// no data. While delta metrics are accepted by this alignment special care
|
||||
// should be taken that the values for the metric will always be positive.
|
||||
// The output is a gauge metric with value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23
|
||||
)
|
||||
|
||||
var Aggregation_Aligner_name = map[int32]string{
|
||||
0: "ALIGN_NONE",
|
||||
1: "ALIGN_DELTA",
|
||||
2: "ALIGN_RATE",
|
||||
3: "ALIGN_INTERPOLATE",
|
||||
4: "ALIGN_NEXT_OLDER",
|
||||
10: "ALIGN_MIN",
|
||||
11: "ALIGN_MAX",
|
||||
12: "ALIGN_MEAN",
|
||||
13: "ALIGN_COUNT",
|
||||
14: "ALIGN_SUM",
|
||||
15: "ALIGN_STDDEV",
|
||||
16: "ALIGN_COUNT_TRUE",
|
||||
24: "ALIGN_COUNT_FALSE",
|
||||
17: "ALIGN_FRACTION_TRUE",
|
||||
18: "ALIGN_PERCENTILE_99",
|
||||
19: "ALIGN_PERCENTILE_95",
|
||||
20: "ALIGN_PERCENTILE_50",
|
||||
21: "ALIGN_PERCENTILE_05",
|
||||
23: "ALIGN_PERCENT_CHANGE",
|
||||
}
|
||||
var Aggregation_Aligner_value = map[string]int32{
|
||||
"ALIGN_NONE": 0,
|
||||
"ALIGN_DELTA": 1,
|
||||
"ALIGN_RATE": 2,
|
||||
"ALIGN_INTERPOLATE": 3,
|
||||
"ALIGN_NEXT_OLDER": 4,
|
||||
"ALIGN_MIN": 10,
|
||||
"ALIGN_MAX": 11,
|
||||
"ALIGN_MEAN": 12,
|
||||
"ALIGN_COUNT": 13,
|
||||
"ALIGN_SUM": 14,
|
||||
"ALIGN_STDDEV": 15,
|
||||
"ALIGN_COUNT_TRUE": 16,
|
||||
"ALIGN_COUNT_FALSE": 24,
|
||||
"ALIGN_FRACTION_TRUE": 17,
|
||||
"ALIGN_PERCENTILE_99": 18,
|
||||
"ALIGN_PERCENTILE_95": 19,
|
||||
"ALIGN_PERCENTILE_50": 20,
|
||||
"ALIGN_PERCENTILE_05": 21,
|
||||
"ALIGN_PERCENT_CHANGE": 23,
|
||||
}
|
||||
|
||||
func (x Aggregation_Aligner) String() string {
|
||||
return proto.EnumName(Aggregation_Aligner_name, int32(x))
|
||||
}
|
||||
func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_common_006e316316847821, []int{2, 0}
|
||||
}
|
||||
|
||||
// A Reducer describes how to aggregate data points from multiple
|
||||
// time series into a single time series.
|
||||
type Aggregation_Reducer int32
|
||||
|
||||
const (
|
||||
// No cross-time series reduction. The output of the aligner is
|
||||
// returned.
|
||||
Aggregation_REDUCE_NONE Aggregation_Reducer = 0
|
||||
// Reduce by computing the mean across time series for each
|
||||
// alignment period. This reducer is valid for delta and
|
||||
// gauge metrics with numeric or distribution values. The value type of the
|
||||
// output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_REDUCE_MEAN Aggregation_Reducer = 1
|
||||
// Reduce by computing the minimum across time series for each
|
||||
// alignment period. This reducer is valid for delta and
|
||||
// gauge metrics with numeric values. The value type of the output
|
||||
// is the same as the value type of the input.
|
||||
Aggregation_REDUCE_MIN Aggregation_Reducer = 2
|
||||
// Reduce by computing the maximum across time series for each
|
||||
// alignment period. This reducer is valid for delta and
|
||||
// gauge metrics with numeric values. The value type of the output
|
||||
// is the same as the value type of the input.
|
||||
Aggregation_REDUCE_MAX Aggregation_Reducer = 3
|
||||
// Reduce by computing the sum across time series for each
|
||||
// alignment period. This reducer is valid for delta and
|
||||
// gauge metrics with numeric and distribution values. The value type of
|
||||
// the output is the same as the value type of the input.
|
||||
Aggregation_REDUCE_SUM Aggregation_Reducer = 4
|
||||
// Reduce by computing the standard deviation across time series
|
||||
// for each alignment period. This reducer is valid for delta
|
||||
// and gauge metrics with numeric or distribution values. The value type of
|
||||
// the output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5
|
||||
// Reduce by computing the count of data points across time series
|
||||
// for each alignment period. This reducer is valid for delta
|
||||
// and gauge metrics of numeric, Boolean, distribution, and string value
|
||||
// type. The value type of the output is
|
||||
// [INT64][google.api.MetricDescriptor.ValueType.INT64].
|
||||
Aggregation_REDUCE_COUNT Aggregation_Reducer = 6
|
||||
// Reduce by computing the count of True-valued data points across time
|
||||
// series for each alignment period. This reducer is valid for delta
|
||||
// and gauge metrics of Boolean value type. The value type of
|
||||
// the output is [INT64][google.api.MetricDescriptor.ValueType.INT64].
|
||||
Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7
|
||||
// Reduce by computing the count of False-valued data points across time
|
||||
// series for each alignment period. This reducer is valid for delta
|
||||
// and gauge metrics of Boolean value type. The value type of
|
||||
// the output is [INT64][google.api.MetricDescriptor.ValueType.INT64].
|
||||
Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15
|
||||
// Reduce by computing the fraction of True-valued data points across time
|
||||
// series for each alignment period. This reducer is valid for delta
|
||||
// and gauge metrics of Boolean value type. The output value is in the
|
||||
// range [0, 1] and has value type
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
|
||||
Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8
|
||||
// Reduce by computing 99th percentile of data points across time series
|
||||
// for each alignment period. This reducer is valid for gauge and delta
|
||||
// metrics of numeric and distribution type. The value of the output is
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
|
||||
Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9
|
||||
// Reduce by computing 95th percentile of data points across time series
|
||||
// for each alignment period. This reducer is valid for gauge and delta
|
||||
// metrics of numeric and distribution type. The value of the output is
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
|
||||
Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10
|
||||
// Reduce by computing 50th percentile of data points across time series
|
||||
// for each alignment period. This reducer is valid for gauge and delta
|
||||
// metrics of numeric and distribution type. The value of the output is
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
|
||||
Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11
|
||||
// Reduce by computing 5th percentile of data points across time series
|
||||
// for each alignment period. This reducer is valid for gauge and delta
|
||||
// metrics of numeric and distribution type. The value of the output is
|
||||
// [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
|
||||
Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12
|
||||
)
|
||||
|
||||
var Aggregation_Reducer_name = map[int32]string{
|
||||
0: "REDUCE_NONE",
|
||||
1: "REDUCE_MEAN",
|
||||
2: "REDUCE_MIN",
|
||||
3: "REDUCE_MAX",
|
||||
4: "REDUCE_SUM",
|
||||
5: "REDUCE_STDDEV",
|
||||
6: "REDUCE_COUNT",
|
||||
7: "REDUCE_COUNT_TRUE",
|
||||
15: "REDUCE_COUNT_FALSE",
|
||||
8: "REDUCE_FRACTION_TRUE",
|
||||
9: "REDUCE_PERCENTILE_99",
|
||||
10: "REDUCE_PERCENTILE_95",
|
||||
11: "REDUCE_PERCENTILE_50",
|
||||
12: "REDUCE_PERCENTILE_05",
|
||||
}
|
||||
var Aggregation_Reducer_value = map[string]int32{
|
||||
"REDUCE_NONE": 0,
|
||||
"REDUCE_MEAN": 1,
|
||||
"REDUCE_MIN": 2,
|
||||
"REDUCE_MAX": 3,
|
||||
"REDUCE_SUM": 4,
|
||||
"REDUCE_STDDEV": 5,
|
||||
"REDUCE_COUNT": 6,
|
||||
"REDUCE_COUNT_TRUE": 7,
|
||||
"REDUCE_COUNT_FALSE": 15,
|
||||
"REDUCE_FRACTION_TRUE": 8,
|
||||
"REDUCE_PERCENTILE_99": 9,
|
||||
"REDUCE_PERCENTILE_95": 10,
|
||||
"REDUCE_PERCENTILE_50": 11,
|
||||
"REDUCE_PERCENTILE_05": 12,
|
||||
}
|
||||
|
||||
func (x Aggregation_Reducer) String() string {
|
||||
return proto.EnumName(Aggregation_Reducer_name, int32(x))
|
||||
}
|
||||
func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_common_006e316316847821, []int{2, 1}
|
||||
}
|
||||
|
||||
// A single strongly-typed value.
|
||||
type TypedValue struct {
|
||||
// The typed value field.
|
||||
//
|
||||
// Types that are valid to be assigned to Value:
|
||||
// *TypedValue_BoolValue
|
||||
// *TypedValue_Int64Value
|
||||
// *TypedValue_DoubleValue
|
||||
// *TypedValue_StringValue
|
||||
// *TypedValue_DistributionValue
|
||||
Value isTypedValue_Value `protobuf_oneof:"value"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TypedValue) Reset() { *m = TypedValue{} }
|
||||
func (m *TypedValue) String() string { return proto.CompactTextString(m) }
|
||||
func (*TypedValue) ProtoMessage() {}
|
||||
func (*TypedValue) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_common_006e316316847821, []int{0}
|
||||
}
|
||||
func (m *TypedValue) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TypedValue.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TypedValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TypedValue.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *TypedValue) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TypedValue.Merge(dst, src)
|
||||
}
|
||||
func (m *TypedValue) XXX_Size() int {
|
||||
return xxx_messageInfo_TypedValue.Size(m)
|
||||
}
|
||||
func (m *TypedValue) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TypedValue.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TypedValue proto.InternalMessageInfo
|
||||
|
||||
type isTypedValue_Value interface {
|
||||
isTypedValue_Value()
|
||||
}
|
||||
|
||||
type TypedValue_BoolValue struct {
|
||||
BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||
}
|
||||
type TypedValue_Int64Value struct {
|
||||
Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
|
||||
}
|
||||
type TypedValue_DoubleValue struct {
|
||||
DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
|
||||
}
|
||||
type TypedValue_StringValue struct {
|
||||
StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||
}
|
||||
type TypedValue_DistributionValue struct {
|
||||
DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*TypedValue_BoolValue) isTypedValue_Value() {}
|
||||
func (*TypedValue_Int64Value) isTypedValue_Value() {}
|
||||
func (*TypedValue_DoubleValue) isTypedValue_Value() {}
|
||||
func (*TypedValue_StringValue) isTypedValue_Value() {}
|
||||
func (*TypedValue_DistributionValue) isTypedValue_Value() {}
|
||||
|
||||
func (m *TypedValue) GetValue() isTypedValue_Value {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TypedValue) GetBoolValue() bool {
|
||||
if x, ok := m.GetValue().(*TypedValue_BoolValue); ok {
|
||||
return x.BoolValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *TypedValue) GetInt64Value() int64 {
|
||||
if x, ok := m.GetValue().(*TypedValue_Int64Value); ok {
|
||||
return x.Int64Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *TypedValue) GetDoubleValue() float64 {
|
||||
if x, ok := m.GetValue().(*TypedValue_DoubleValue); ok {
|
||||
return x.DoubleValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *TypedValue) GetStringValue() string {
|
||||
if x, ok := m.GetValue().(*TypedValue_StringValue); ok {
|
||||
return x.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *TypedValue) GetDistributionValue() *distribution.Distribution {
|
||||
if x, ok := m.GetValue().(*TypedValue_DistributionValue); ok {
|
||||
return x.DistributionValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*TypedValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _TypedValue_OneofMarshaler, _TypedValue_OneofUnmarshaler, _TypedValue_OneofSizer, []interface{}{
|
||||
(*TypedValue_BoolValue)(nil),
|
||||
(*TypedValue_Int64Value)(nil),
|
||||
(*TypedValue_DoubleValue)(nil),
|
||||
(*TypedValue_StringValue)(nil),
|
||||
(*TypedValue_DistributionValue)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _TypedValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*TypedValue)
|
||||
// value
|
||||
switch x := m.Value.(type) {
|
||||
case *TypedValue_BoolValue:
|
||||
t := uint64(0)
|
||||
if x.BoolValue {
|
||||
t = 1
|
||||
}
|
||||
b.EncodeVarint(1<<3 | proto.WireVarint)
|
||||
b.EncodeVarint(t)
|
||||
case *TypedValue_Int64Value:
|
||||
b.EncodeVarint(2<<3 | proto.WireVarint)
|
||||
b.EncodeVarint(uint64(x.Int64Value))
|
||||
case *TypedValue_DoubleValue:
|
||||
b.EncodeVarint(3<<3 | proto.WireFixed64)
|
||||
b.EncodeFixed64(math.Float64bits(x.DoubleValue))
|
||||
case *TypedValue_StringValue:
|
||||
b.EncodeVarint(4<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.StringValue)
|
||||
case *TypedValue_DistributionValue:
|
||||
b.EncodeVarint(5<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.DistributionValue); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("TypedValue.Value has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _TypedValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*TypedValue)
|
||||
switch tag {
|
||||
case 1: // value.bool_value
|
||||
if wire != proto.WireVarint {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeVarint()
|
||||
m.Value = &TypedValue_BoolValue{x != 0}
|
||||
return true, err
|
||||
case 2: // value.int64_value
|
||||
if wire != proto.WireVarint {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeVarint()
|
||||
m.Value = &TypedValue_Int64Value{int64(x)}
|
||||
return true, err
|
||||
case 3: // value.double_value
|
||||
if wire != proto.WireFixed64 {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeFixed64()
|
||||
m.Value = &TypedValue_DoubleValue{math.Float64frombits(x)}
|
||||
return true, err
|
||||
case 4: // value.string_value
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Value = &TypedValue_StringValue{x}
|
||||
return true, err
|
||||
case 5: // value.distribution_value
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(distribution.Distribution)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Value = &TypedValue_DistributionValue{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _TypedValue_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*TypedValue)
|
||||
// value
|
||||
switch x := m.Value.(type) {
|
||||
case *TypedValue_BoolValue:
|
||||
n += 1 // tag and wire
|
||||
n += 1
|
||||
case *TypedValue_Int64Value:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(x.Int64Value))
|
||||
case *TypedValue_DoubleValue:
|
||||
n += 1 // tag and wire
|
||||
n += 8
|
||||
case *TypedValue_StringValue:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.StringValue)))
|
||||
n += len(x.StringValue)
|
||||
case *TypedValue_DistributionValue:
|
||||
s := proto.Size(x.DistributionValue)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// A time interval extending just after a start time through an end time.
|
||||
// If the start time is the same as the end time, then the interval
|
||||
// represents a single point in time.
|
||||
type TimeInterval struct {
|
||||
// Required. The end of the time interval.
|
||||
EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
|
||||
// Optional. The beginning of the time interval. The default value
|
||||
// for the start time is the end time. The start time must not be
|
||||
// later than the end time.
|
||||
StartTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TimeInterval) Reset() { *m = TimeInterval{} }
|
||||
func (m *TimeInterval) String() string { return proto.CompactTextString(m) }
|
||||
func (*TimeInterval) ProtoMessage() {}
|
||||
func (*TimeInterval) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_common_006e316316847821, []int{1}
|
||||
}
|
||||
func (m *TimeInterval) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TimeInterval.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *TimeInterval) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TimeInterval.Merge(dst, src)
|
||||
}
|
||||
func (m *TimeInterval) XXX_Size() int {
|
||||
return xxx_messageInfo_TimeInterval.Size(m)
|
||||
}
|
||||
func (m *TimeInterval) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TimeInterval.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TimeInterval proto.InternalMessageInfo
|
||||
|
||||
func (m *TimeInterval) GetEndTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.EndTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TimeInterval) GetStartTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.StartTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Describes how to combine multiple time series to provide different views of
|
||||
// the data. Aggregation consists of an alignment step on individual time
|
||||
// series (`alignment_period` and `per_series_aligner`) followed by an optional
|
||||
// reduction step of the data across the aligned time series
|
||||
// (`cross_series_reducer` and `group_by_fields`). For more details, see
|
||||
// [Aggregation](/monitoring/api/learn_more#aggregation).
|
||||
type Aggregation struct {
|
||||
// The alignment period for per-[time series][google.monitoring.v3.TimeSeries]
|
||||
// alignment. If present, `alignmentPeriod` must be at least 60
|
||||
// seconds. After per-time series alignment, each time series will
|
||||
// contain data points only on the period boundaries. If
|
||||
// `perSeriesAligner` is not specified or equals `ALIGN_NONE`, then
|
||||
// this field is ignored. If `perSeriesAligner` is specified and
|
||||
// does not equal `ALIGN_NONE`, then this field must be defined;
|
||||
// otherwise an error is returned.
|
||||
AlignmentPeriod *duration.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"`
|
||||
// The approach to be used to align individual time series. Not all
|
||||
// alignment functions may be applied to all time series, depending
|
||||
// on the metric type and value type of the original time
|
||||
// series. Alignment may change the metric type or the value type of
|
||||
// the time series.
|
||||
//
|
||||
// Time series data must be aligned in order to perform cross-time
|
||||
// series reduction. If `crossSeriesReducer` is specified, then
|
||||
// `perSeriesAligner` must be specified and not equal `ALIGN_NONE`
|
||||
// and `alignmentPeriod` must be specified; otherwise, an error is
|
||||
// returned.
|
||||
PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"`
|
||||
// The approach to be used to combine time series. Not all reducer
|
||||
// functions may be applied to all time series, depending on the
|
||||
// metric type and the value type of the original time
|
||||
// series. Reduction may change the metric type of value type of the
|
||||
// time series.
|
||||
//
|
||||
// Time series data must be aligned in order to perform cross-time
|
||||
// series reduction. If `crossSeriesReducer` is specified, then
|
||||
// `perSeriesAligner` must be specified and not equal `ALIGN_NONE`
|
||||
// and `alignmentPeriod` must be specified; otherwise, an error is
|
||||
// returned.
|
||||
CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"`
|
||||
// The set of fields to preserve when `crossSeriesReducer` is
|
||||
// specified. The `groupByFields` determine how the time series are
|
||||
// partitioned into subsets prior to applying the aggregation
|
||||
// function. Each subset contains time series that have the same
|
||||
// value for each of the grouping fields. Each individual time
|
||||
// series is a member of exactly one subset. The
|
||||
// `crossSeriesReducer` is applied to each subset of time series.
|
||||
// It is not possible to reduce across different resource types, so
|
||||
// this field implicitly contains `resource.type`. Fields not
|
||||
// specified in `groupByFields` are aggregated away. If
|
||||
// `groupByFields` is not specified and all the time series have
|
||||
// the same resource type, then the time series are aggregated into
|
||||
// a single output time series. If `crossSeriesReducer` is not
|
||||
// defined, this field is ignored.
|
||||
GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Aggregation) Reset() { *m = Aggregation{} }
|
||||
func (m *Aggregation) String() string { return proto.CompactTextString(m) }
|
||||
func (*Aggregation) ProtoMessage() {}
|
||||
func (*Aggregation) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_common_006e316316847821, []int{2}
|
||||
}
|
||||
func (m *Aggregation) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Aggregation.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Aggregation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Aggregation.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Aggregation) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Aggregation.Merge(dst, src)
|
||||
}
|
||||
func (m *Aggregation) XXX_Size() int {
|
||||
return xxx_messageInfo_Aggregation.Size(m)
|
||||
}
|
||||
func (m *Aggregation) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Aggregation.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Aggregation proto.InternalMessageInfo
|
||||
|
||||
func (m *Aggregation) GetAlignmentPeriod() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.AlignmentPeriod
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Aggregation) GetPerSeriesAligner() Aggregation_Aligner {
|
||||
if m != nil {
|
||||
return m.PerSeriesAligner
|
||||
}
|
||||
return Aggregation_ALIGN_NONE
|
||||
}
|
||||
|
||||
func (m *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer {
|
||||
if m != nil {
|
||||
return m.CrossSeriesReducer
|
||||
}
|
||||
return Aggregation_REDUCE_NONE
|
||||
}
|
||||
|
||||
func (m *Aggregation) GetGroupByFields() []string {
|
||||
if m != nil {
|
||||
return m.GroupByFields
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue")
|
||||
proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval")
|
||||
proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation")
|
||||
proto.RegisterEnum("google.monitoring.v3.ComparisonType", ComparisonType_name, ComparisonType_value)
|
||||
proto.RegisterEnum("google.monitoring.v3.ServiceTier", ServiceTier_name, ServiceTier_value)
|
||||
proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value)
|
||||
proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor_common_006e316316847821)
|
||||
}
|
||||
|
||||
var fileDescriptor_common_006e316316847821 = []byte{
|
||||
// 954 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xc1, 0x6e, 0xe3, 0x44,
|
||||
0x18, 0xc7, 0xe3, 0xa4, 0x6d, 0x9a, 0xcf, 0x6d, 0x33, 0x9d, 0xed, 0x76, 0x43, 0xb5, 0xb0, 0xd9,
|
||||
0x22, 0xa1, 0xb0, 0x07, 0xa7, 0x6a, 0x09, 0x52, 0x85, 0x84, 0xe4, 0x3a, 0xd3, 0xd6, 0x52, 0xe2,
|
||||
0x84, 0x89, 0x53, 0x2a, 0x28, 0xb2, 0x9c, 0x66, 0xd6, 0xb2, 0x94, 0x78, 0x2c, 0xdb, 0xa9, 0xd4,
|
||||
0x1b, 0x77, 0xde, 0x81, 0x0b, 0x37, 0x6e, 0xbc, 0x06, 0x0f, 0xc3, 0x85, 0x17, 0x40, 0x9e, 0x71,
|
||||
0xd6, 0x4e, 0x08, 0x62, 0x8f, 0xdf, 0xef, 0xff, 0xff, 0xbe, 0x99, 0xf9, 0x8f, 0x35, 0x86, 0xb7,
|
||||
0x1e, 0xe7, 0xde, 0x8c, 0xb5, 0xe7, 0x3c, 0xf0, 0x13, 0x1e, 0xf9, 0x81, 0xd7, 0x7e, 0xba, 0x68,
|
||||
0x3f, 0xf2, 0xf9, 0x9c, 0x07, 0x5a, 0x18, 0xf1, 0x84, 0xe3, 0x23, 0x69, 0xd1, 0x72, 0x8b, 0xf6,
|
||||
0x74, 0x71, 0xf2, 0x3a, 0x6b, 0x74, 0x43, 0xbf, 0xed, 0x06, 0x01, 0x4f, 0xdc, 0xc4, 0xe7, 0x41,
|
||||
0x2c, 0x7b, 0x4e, 0x3e, 0x2d, 0xa8, 0x53, 0x3f, 0x4e, 0x22, 0x7f, 0xb2, 0x48, 0xf5, 0x4c, 0xfe,
|
||||
0x2c, 0x93, 0x45, 0x35, 0x59, 0xbc, 0x6f, 0x4f, 0x17, 0x91, 0x5b, 0xd0, 0xdf, 0xac, 0xeb, 0x89,
|
||||
0x3f, 0x67, 0x71, 0xe2, 0xce, 0x43, 0x69, 0x38, 0xfd, 0x4b, 0x01, 0xb0, 0x9f, 0x43, 0x36, 0xbd,
|
||||
0x73, 0x67, 0x0b, 0x86, 0xdf, 0x00, 0x4c, 0x38, 0x9f, 0x39, 0x4f, 0x69, 0xd5, 0x50, 0x9a, 0x4a,
|
||||
0x6b, 0xf7, 0xb6, 0x44, 0x6b, 0x29, 0x93, 0x86, 0xb7, 0xa0, 0xfa, 0x41, 0xf2, 0xf5, 0x57, 0x99,
|
||||
0xa3, 0xdc, 0x54, 0x5a, 0x95, 0xdb, 0x12, 0x05, 0x01, 0xa5, 0xe5, 0x73, 0xd8, 0x9b, 0xf2, 0xc5,
|
||||
0x64, 0xc6, 0x32, 0x4f, 0xa5, 0xa9, 0xb4, 0x94, 0xdb, 0x12, 0x55, 0x25, 0xfd, 0x60, 0x4a, 0x0f,
|
||||
0x13, 0x78, 0x99, 0x69, 0xab, 0xa9, 0xb4, 0x6a, 0xa9, 0x49, 0x52, 0x69, 0x32, 0x01, 0x17, 0xcf,
|
||||
0x9c, 0x59, 0xb7, 0x9b, 0x4a, 0x4b, 0x3d, 0x6f, 0x68, 0x59, 0x9a, 0x6e, 0xe8, 0x6b, 0xdd, 0x82,
|
||||
0xeb, 0xb6, 0x44, 0x0f, 0x8b, 0x5d, 0x62, 0xd4, 0x55, 0x15, 0xb6, 0x45, 0xf7, 0xe9, 0xcf, 0x0a,
|
||||
0xec, 0xd9, 0xfe, 0x9c, 0x99, 0x41, 0xc2, 0xa2, 0x27, 0x77, 0x86, 0x3b, 0xb0, 0xcb, 0x82, 0xa9,
|
||||
0x93, 0x06, 0x23, 0x8e, 0xa3, 0x9e, 0x9f, 0x2c, 0x47, 0x2f, 0x53, 0xd3, 0xec, 0x65, 0x6a, 0xb4,
|
||||
0xca, 0x82, 0x69, 0x5a, 0xe1, 0x4b, 0x80, 0x38, 0x71, 0xa3, 0x44, 0x36, 0x2a, 0xff, 0xdb, 0x58,
|
||||
0x13, 0xee, 0xb4, 0x3e, 0xfd, 0xbb, 0x0a, 0xaa, 0xee, 0x79, 0x11, 0xf3, 0xc4, 0x55, 0xe1, 0x2e,
|
||||
0x20, 0x77, 0xe6, 0x7b, 0xc1, 0x9c, 0x05, 0x89, 0x13, 0xb2, 0xc8, 0xe7, 0xd3, 0x6c, 0xe0, 0x27,
|
||||
0xff, 0x1a, 0xd8, 0xcd, 0xee, 0x97, 0xd6, 0x3f, 0xb4, 0x0c, 0x45, 0x07, 0xfe, 0x1e, 0x70, 0xc8,
|
||||
0x22, 0x27, 0x66, 0x91, 0xcf, 0x62, 0x47, 0xa8, 0x2c, 0x12, 0x27, 0x3a, 0x38, 0xff, 0x52, 0xdb,
|
||||
0xf4, 0xe9, 0x69, 0x85, 0x4d, 0x68, 0xba, 0x6c, 0xa0, 0x28, 0x64, 0xd1, 0x48, 0xcc, 0xc8, 0x08,
|
||||
0xfe, 0x11, 0x8e, 0x1e, 0x23, 0x1e, 0xc7, 0xcb, 0xd1, 0x11, 0x9b, 0x2e, 0x1e, 0x59, 0x24, 0xae,
|
||||
0xec, 0xa3, 0x46, 0x53, 0xd9, 0x40, 0xb1, 0x18, 0x23, 0x87, 0x67, 0x0c, 0x7f, 0x01, 0x75, 0x2f,
|
||||
0xe2, 0x8b, 0xd0, 0x99, 0x3c, 0x3b, 0xef, 0x7d, 0x36, 0x9b, 0xc6, 0x8d, 0xed, 0x66, 0xa5, 0x55,
|
||||
0xa3, 0xfb, 0x02, 0x5f, 0x3d, 0x5f, 0x0b, 0x78, 0xfa, 0x4b, 0x05, 0xaa, 0xcb, 0x0d, 0x1d, 0x00,
|
||||
0xe8, 0x3d, 0xf3, 0xc6, 0x72, 0xac, 0x81, 0x45, 0x50, 0x09, 0xd7, 0x41, 0x95, 0x75, 0x97, 0xf4,
|
||||
0x6c, 0x1d, 0x29, 0xb9, 0x81, 0xea, 0x36, 0x41, 0x65, 0xfc, 0x12, 0x0e, 0x65, 0x6d, 0x5a, 0x36,
|
||||
0xa1, 0xc3, 0x41, 0x2f, 0xc5, 0x15, 0x7c, 0x04, 0x28, 0x9b, 0x43, 0xee, 0x6d, 0x67, 0xd0, 0xeb,
|
||||
0x12, 0x8a, 0xb6, 0xf0, 0x3e, 0xd4, 0x24, 0xed, 0x9b, 0x16, 0x82, 0x42, 0xa9, 0xdf, 0x23, 0x35,
|
||||
0x1f, 0xdd, 0x27, 0xba, 0x85, 0xf6, 0xf2, 0xb5, 0x8d, 0xc1, 0xd8, 0xb2, 0xd1, 0x7e, 0xee, 0x1f,
|
||||
0x8d, 0xfb, 0xe8, 0x00, 0x23, 0xd8, 0xcb, 0x4a, 0xbb, 0xdb, 0x25, 0x77, 0xa8, 0x9e, 0xaf, 0x2a,
|
||||
0x3a, 0x1c, 0x9b, 0x8e, 0x09, 0x42, 0xf9, 0x16, 0x25, 0xbd, 0xd6, 0x7b, 0x23, 0x82, 0x1a, 0xf8,
|
||||
0x15, 0xbc, 0x90, 0xf8, 0x9a, 0xea, 0x86, 0x6d, 0x0e, 0x2c, 0xe9, 0x3f, 0xcc, 0x85, 0x21, 0xa1,
|
||||
0x06, 0xb1, 0x6c, 0xb3, 0x47, 0x9c, 0xcb, 0x4b, 0x84, 0x37, 0x0b, 0x1d, 0xf4, 0x62, 0xa3, 0xd0,
|
||||
0x39, 0x43, 0x47, 0x1b, 0x85, 0xb3, 0x0e, 0x7a, 0x89, 0x1b, 0x70, 0xb4, 0x22, 0x38, 0xc6, 0xad,
|
||||
0x6e, 0xdd, 0x10, 0xf4, 0xea, 0xf4, 0x8f, 0x32, 0x54, 0x97, 0x37, 0x58, 0x07, 0x95, 0x92, 0xee,
|
||||
0xd8, 0x20, 0x85, 0xeb, 0xc8, 0x80, 0xc8, 0x48, 0x5c, 0xc7, 0x12, 0x98, 0x16, 0x2a, 0x17, 0x6b,
|
||||
0xfd, 0x1e, 0x55, 0x0a, 0x75, 0x9a, 0xd9, 0x16, 0x3e, 0x84, 0xfd, 0x65, 0x2d, 0x43, 0xdb, 0x4e,
|
||||
0x63, 0xcc, 0x90, 0xcc, 0x79, 0x27, 0x0d, 0xac, 0x48, 0x64, 0x2e, 0x55, 0x7c, 0x0c, 0x78, 0x05,
|
||||
0xcb, 0x20, 0xeb, 0xe9, 0x59, 0x32, 0xbe, 0x9a, 0xe4, 0x6e, 0x41, 0x59, 0x8d, 0xb2, 0xf6, 0x1f,
|
||||
0x4a, 0x07, 0xc1, 0x66, 0xa5, 0x73, 0x86, 0xd4, 0xcd, 0xca, 0x59, 0x07, 0xed, 0xbd, 0xfb, 0x55,
|
||||
0x81, 0x03, 0x83, 0xcf, 0x43, 0x37, 0xf2, 0x63, 0x1e, 0xa4, 0x6f, 0x2e, 0x3e, 0x81, 0x63, 0x63,
|
||||
0xd0, 0x1f, 0xea, 0xd4, 0x1c, 0x0d, 0x2c, 0x67, 0x6c, 0x8d, 0x86, 0xc4, 0x30, 0xaf, 0x4d, 0xd2,
|
||||
0x45, 0xa5, 0x34, 0x84, 0x82, 0x76, 0x63, 0x23, 0x65, 0x1d, 0xa5, 0x5f, 0xf6, 0x2a, 0xea, 0xd9,
|
||||
0xa8, 0xb2, 0x8e, 0x88, 0x0c, 0xb4, 0x80, 0xc8, 0x77, 0x68, 0x7b, 0x0d, 0x59, 0x04, 0xed, 0xbc,
|
||||
0xfb, 0x09, 0xd4, 0x11, 0x8b, 0x9e, 0xfc, 0x47, 0x66, 0xfb, 0x2c, 0xc2, 0xaf, 0xa1, 0x31, 0x22,
|
||||
0xf4, 0xce, 0x34, 0x88, 0x63, 0x9b, 0x84, 0xae, 0x6d, 0xef, 0x18, 0xf0, 0x8a, 0x7a, 0xa5, 0x8f,
|
||||
0x4c, 0x03, 0x29, 0xe9, 0xf9, 0x57, 0xf8, 0x90, 0x92, 0xbe, 0x39, 0xee, 0xa3, 0xf2, 0xd5, 0x6f,
|
||||
0x0a, 0x34, 0x1e, 0xf9, 0x7c, 0xe3, 0x73, 0x71, 0xa5, 0x1a, 0xe2, 0x47, 0x39, 0x4c, 0x9f, 0xb9,
|
||||
0xa1, 0xf2, 0xc3, 0xb7, 0x99, 0xc9, 0xe3, 0x33, 0x37, 0xf0, 0x34, 0x1e, 0x79, 0x6d, 0x8f, 0x05,
|
||||
0xe2, 0x11, 0x6c, 0x4b, 0xc9, 0x0d, 0xfd, 0x78, 0xf5, 0x5f, 0xfb, 0x4d, 0x5e, 0xfd, 0x5e, 0x3e,
|
||||
0xb9, 0x91, 0x03, 0x8c, 0x19, 0x5f, 0x4c, 0xb5, 0x7e, 0xbe, 0xd6, 0xdd, 0xc5, 0x9f, 0x4b, 0xf1,
|
||||
0x41, 0x88, 0x0f, 0xb9, 0xf8, 0x70, 0x77, 0x31, 0xd9, 0x11, 0x8b, 0x5c, 0xfc, 0x13, 0x00, 0x00,
|
||||
0xff, 0xff, 0xe2, 0x9f, 0x67, 0xb2, 0xcf, 0x07, 0x00, 0x00,
|
||||
}
|
156
vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
generated
vendored
Normal file
156
vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/group.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The description of a dynamic collection of monitored resources. Each group
|
||||
// has a filter that is matched against monitored resources and their associated
|
||||
// metadata. If a group's filter matches an available monitored resource, then
|
||||
// that resource is a member of that group. Groups can contain any number of
|
||||
// monitored resources, and each monitored resource can be a member of any
|
||||
// number of groups.
|
||||
//
|
||||
// Groups can be nested in parent-child hierarchies. The `parentName` field
|
||||
// identifies an optional parent for each group. If a group has a parent, then
|
||||
// the only monitored resources available to be matched by the group's filter
|
||||
// are the resources contained in the parent group. In other words, a group
|
||||
// contains the monitored resources that match its filter and the filters of all
|
||||
// the group's ancestors. A group without a parent can contain any monitored
|
||||
// resource.
|
||||
//
|
||||
// For example, consider an infrastructure running a set of instances with two
|
||||
// user-defined tags: `"environment"` and `"role"`. A parent group has a filter,
|
||||
// `environment="production"`. A child of that parent group has a filter,
|
||||
// `role="transcoder"`. The parent group contains all instances in the
|
||||
// production environment, regardless of their roles. The child group contains
|
||||
// instances that have the transcoder role *and* are in the production
|
||||
// environment.
|
||||
//
|
||||
// The monitored resources contained in a group can change at any moment,
|
||||
// depending on what resources exist and what filters are associated with the
|
||||
// group and its ancestors.
|
||||
type Group struct {
|
||||
// Output only. The name of this group. The format is
|
||||
// `"projects/{project_id_or_number}/groups/{group_id}"`.
|
||||
// When creating a group, this field is ignored and a new name is created
|
||||
// consisting of the project specified in the call to `CreateGroup`
|
||||
// and a unique `{group_id}` that is generated automatically.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// A user-assigned name for this group, used only for display purposes.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// The name of the group's parent, if it has one.
|
||||
// The format is `"projects/{project_id_or_number}/groups/{group_id}"`.
|
||||
// For groups with no parent, `parentName` is the empty string, `""`.
|
||||
ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"`
|
||||
// The filter used to determine which monitored resources belong to this group.
|
||||
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// If true, the members of this group are considered to be a cluster.
|
||||
// The system can perform additional analysis on groups that are clusters.
|
||||
IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Group) Reset() { *m = Group{} }
|
||||
func (m *Group) String() string { return proto.CompactTextString(m) }
|
||||
func (*Group) ProtoMessage() {}
|
||||
func (*Group) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_1b3b789bd5fc032e, []int{0}
|
||||
}
|
||||
func (m *Group) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Group.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Group.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Group) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Group.Merge(dst, src)
|
||||
}
|
||||
func (m *Group) XXX_Size() int {
|
||||
return xxx_messageInfo_Group.Size(m)
|
||||
}
|
||||
func (m *Group) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Group.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Group proto.InternalMessageInfo
|
||||
|
||||
func (m *Group) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Group) GetDisplayName() string {
|
||||
if m != nil {
|
||||
return m.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Group) GetParentName() string {
|
||||
if m != nil {
|
||||
return m.ParentName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Group) GetFilter() string {
|
||||
if m != nil {
|
||||
return m.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Group) GetIsCluster() bool {
|
||||
if m != nil {
|
||||
return m.IsCluster
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor_group_1b3b789bd5fc032e)
|
||||
}
|
||||
|
||||
var fileDescriptor_group_1b3b789bd5fc032e = []byte{
|
||||
// 261 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x2b, 0x31,
|
||||
0x14, 0x87, 0x49, 0xef, 0xed, 0x60, 0x4f, 0x5d, 0x0d, 0x22, 0x83, 0x20, 0x8e, 0xae, 0xba, 0xca,
|
||||
0x2c, 0xb2, 0x14, 0x5c, 0xb4, 0x8b, 0xae, 0x94, 0xd2, 0x45, 0x17, 0x32, 0x50, 0x62, 0x1b, 0x43,
|
||||
0x20, 0x93, 0x13, 0x92, 0x99, 0x82, 0x2f, 0xe2, 0x03, 0xb8, 0xf4, 0x51, 0x7c, 0x2a, 0x99, 0x93,
|
||||
0x91, 0x41, 0x70, 0x97, 0xf3, 0xfb, 0x3e, 0x72, 0xfe, 0x40, 0xa9, 0x11, 0xb5, 0x55, 0x55, 0x83,
|
||||
0xce, 0xb4, 0x18, 0x8c, 0xd3, 0xd5, 0x49, 0x54, 0x3a, 0x60, 0xe7, 0xb9, 0x0f, 0xd8, 0x62, 0x7e,
|
||||
0x91, 0x0c, 0x3e, 0x1a, 0xfc, 0x24, 0xee, 0xde, 0x19, 0x4c, 0xd7, 0xbd, 0x95, 0xe7, 0xf0, 0xdf,
|
||||
0xc9, 0x46, 0x15, 0xac, 0x64, 0x8b, 0xd9, 0x96, 0xde, 0xf9, 0x2d, 0x9c, 0x1f, 0x4d, 0xf4, 0x56,
|
||||
0xbe, 0xed, 0x89, 0x4d, 0x88, 0xcd, 0x87, 0xec, 0xa9, 0x57, 0x6e, 0x60, 0xee, 0x65, 0x50, 0xae,
|
||||
0x4d, 0xc6, 0x3f, 0x32, 0x20, 0x45, 0x24, 0x5c, 0x42, 0xf6, 0x6a, 0x6c, 0xab, 0x42, 0x31, 0x25,
|
||||
0x36, 0x54, 0xf9, 0x35, 0x80, 0x89, 0xfb, 0x83, 0xed, 0x62, 0xcf, 0xb2, 0x92, 0x2d, 0xce, 0xb6,
|
||||
0x33, 0x13, 0x57, 0x29, 0x58, 0x7e, 0x30, 0x28, 0x0e, 0xd8, 0xf0, 0xbf, 0xa6, 0x5e, 0x02, 0x8d,
|
||||
0xbc, 0xe9, 0xf7, 0xda, 0xb0, 0xe7, 0x87, 0xc1, 0xd1, 0x68, 0xa5, 0xd3, 0x1c, 0x83, 0xae, 0xb4,
|
||||
0x72, 0xb4, 0x75, 0x95, 0x90, 0xf4, 0x26, 0xfe, 0x3e, 0xcd, 0xfd, 0x58, 0x7d, 0x4e, 0xae, 0xd6,
|
||||
0xe9, 0x83, 0x95, 0xc5, 0xee, 0xc8, 0x1f, 0xc7, 0x56, 0x3b, 0xf1, 0xf5, 0x03, 0x6b, 0x82, 0xf5,
|
||||
0x08, 0xeb, 0x9d, 0x78, 0xc9, 0xa8, 0x89, 0xf8, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x95, 0xd1, 0xa1,
|
||||
0x34, 0x7e, 0x01, 0x00, 0x00,
|
||||
}
|
937
vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
generated
vendored
Normal file
937
vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
generated
vendored
Normal file
@@ -0,0 +1,937 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/group_service.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import empty "github.com/golang/protobuf/ptypes/empty"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The `ListGroup` request.
|
||||
type ListGroupsRequest struct {
|
||||
// The project whose groups are to be listed. The format is
|
||||
// `"projects/{project_id_or_number}"`.
|
||||
Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// An optional filter consisting of a single group name. The filters limit the
|
||||
// groups returned based on their parent-child relationship with the specified
|
||||
// group. If no filter is specified, all groups are returned.
|
||||
//
|
||||
// Types that are valid to be assigned to Filter:
|
||||
// *ListGroupsRequest_ChildrenOfGroup
|
||||
// *ListGroupsRequest_AncestorsOfGroup
|
||||
// *ListGroupsRequest_DescendantsOfGroup
|
||||
Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"`
|
||||
// A positive number that is the maximum number of results to return.
|
||||
PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
// If this field is not empty then it must contain the `nextPageToken` value
|
||||
// returned by a previous call to this method. Using this field causes the
|
||||
// method to return additional results from the previous method call.
|
||||
PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} }
|
||||
func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListGroupsRequest) ProtoMessage() {}
|
||||
func (*ListGroupsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{0}
|
||||
}
|
||||
func (m *ListGroupsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListGroupsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListGroupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListGroupsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListGroupsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListGroupsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *ListGroupsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListGroupsRequest.Size(m)
|
||||
}
|
||||
func (m *ListGroupsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListGroupsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListGroupsRequest proto.InternalMessageInfo
|
||||
|
||||
type isListGroupsRequest_Filter interface {
|
||||
isListGroupsRequest_Filter()
|
||||
}
|
||||
|
||||
type ListGroupsRequest_ChildrenOfGroup struct {
|
||||
ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"`
|
||||
}
|
||||
type ListGroupsRequest_AncestorsOfGroup struct {
|
||||
AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"`
|
||||
}
|
||||
type ListGroupsRequest_DescendantsOfGroup struct {
|
||||
DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {}
|
||||
func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {}
|
||||
func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {}
|
||||
|
||||
func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter {
|
||||
if m != nil {
|
||||
return m.Filter
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ListGroupsRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupsRequest) GetChildrenOfGroup() string {
|
||||
if x, ok := m.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok {
|
||||
return x.ChildrenOfGroup
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupsRequest) GetAncestorsOfGroup() string {
|
||||
if x, ok := m.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok {
|
||||
return x.AncestorsOfGroup
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupsRequest) GetDescendantsOfGroup() string {
|
||||
if x, ok := m.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok {
|
||||
return x.DescendantsOfGroup
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupsRequest) GetPageSize() int32 {
|
||||
if m != nil {
|
||||
return m.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ListGroupsRequest) GetPageToken() string {
|
||||
if m != nil {
|
||||
return m.PageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*ListGroupsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _ListGroupsRequest_OneofMarshaler, _ListGroupsRequest_OneofUnmarshaler, _ListGroupsRequest_OneofSizer, []interface{}{
|
||||
(*ListGroupsRequest_ChildrenOfGroup)(nil),
|
||||
(*ListGroupsRequest_AncestorsOfGroup)(nil),
|
||||
(*ListGroupsRequest_DescendantsOfGroup)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _ListGroupsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*ListGroupsRequest)
|
||||
// filter
|
||||
switch x := m.Filter.(type) {
|
||||
case *ListGroupsRequest_ChildrenOfGroup:
|
||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.ChildrenOfGroup)
|
||||
case *ListGroupsRequest_AncestorsOfGroup:
|
||||
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.AncestorsOfGroup)
|
||||
case *ListGroupsRequest_DescendantsOfGroup:
|
||||
b.EncodeVarint(4<<3 | proto.WireBytes)
|
||||
b.EncodeStringBytes(x.DescendantsOfGroup)
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("ListGroupsRequest.Filter has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _ListGroupsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*ListGroupsRequest)
|
||||
switch tag {
|
||||
case 2: // filter.children_of_group
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Filter = &ListGroupsRequest_ChildrenOfGroup{x}
|
||||
return true, err
|
||||
case 3: // filter.ancestors_of_group
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Filter = &ListGroupsRequest_AncestorsOfGroup{x}
|
||||
return true, err
|
||||
case 4: // filter.descendants_of_group
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
x, err := b.DecodeStringBytes()
|
||||
m.Filter = &ListGroupsRequest_DescendantsOfGroup{x}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _ListGroupsRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*ListGroupsRequest)
|
||||
// filter
|
||||
switch x := m.Filter.(type) {
|
||||
case *ListGroupsRequest_ChildrenOfGroup:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.ChildrenOfGroup)))
|
||||
n += len(x.ChildrenOfGroup)
|
||||
case *ListGroupsRequest_AncestorsOfGroup:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.AncestorsOfGroup)))
|
||||
n += len(x.AncestorsOfGroup)
|
||||
case *ListGroupsRequest_DescendantsOfGroup:
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.DescendantsOfGroup)))
|
||||
n += len(x.DescendantsOfGroup)
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// The `ListGroups` response.
|
||||
type ListGroupsResponse struct {
|
||||
// The groups that match the specified filters.
|
||||
Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"`
|
||||
// If there are more results than have been returned, then this field is set
|
||||
// to a non-empty value. To see the additional results,
|
||||
// use that value as `pageToken` in the next call to this method.
|
||||
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} }
|
||||
func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListGroupsResponse) ProtoMessage() {}
|
||||
func (*ListGroupsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{1}
|
||||
}
|
||||
func (m *ListGroupsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListGroupsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListGroupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListGroupsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListGroupsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListGroupsResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *ListGroupsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListGroupsResponse.Size(m)
|
||||
}
|
||||
func (m *ListGroupsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListGroupsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListGroupsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListGroupsResponse) GetGroup() []*Group {
|
||||
if m != nil {
|
||||
return m.Group
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ListGroupsResponse) GetNextPageToken() string {
|
||||
if m != nil {
|
||||
return m.NextPageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The `GetGroup` request.
|
||||
type GetGroupRequest struct {
|
||||
// The group to retrieve. The format is
|
||||
// `"projects/{project_id_or_number}/groups/{group_id}"`.
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} }
|
||||
func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetGroupRequest) ProtoMessage() {}
|
||||
func (*GetGroupRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{2}
|
||||
}
|
||||
func (m *GetGroupRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetGroupRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GetGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetGroupRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *GetGroupRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetGroupRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *GetGroupRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetGroupRequest.Size(m)
|
||||
}
|
||||
func (m *GetGroupRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GetGroupRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GetGroupRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *GetGroupRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The `CreateGroup` request.
|
||||
type CreateGroupRequest struct {
|
||||
// The project in which to create the group. The format is
|
||||
// `"projects/{project_id_or_number}"`.
|
||||
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// A group definition. It is an error to define the `name` field because
|
||||
// the system assigns the name.
|
||||
Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
|
||||
// If true, validate this request but do not create the group.
|
||||
ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} }
|
||||
func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateGroupRequest) ProtoMessage() {}
|
||||
func (*CreateGroupRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{3}
|
||||
}
|
||||
func (m *CreateGroupRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateGroupRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CreateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateGroupRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *CreateGroupRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateGroupRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *CreateGroupRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateGroupRequest.Size(m)
|
||||
}
|
||||
func (m *CreateGroupRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateGroupRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateGroupRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateGroupRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CreateGroupRequest) GetGroup() *Group {
|
||||
if m != nil {
|
||||
return m.Group
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CreateGroupRequest) GetValidateOnly() bool {
|
||||
if m != nil {
|
||||
return m.ValidateOnly
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The `UpdateGroup` request.
|
||||
type UpdateGroupRequest struct {
|
||||
// The new definition of the group. All fields of the existing group,
|
||||
// excepting `name`, are replaced with the corresponding fields of this group.
|
||||
Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
|
||||
// If true, validate this request but do not update the existing group.
|
||||
ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} }
|
||||
func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*UpdateGroupRequest) ProtoMessage() {}
|
||||
func (*UpdateGroupRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{4}
|
||||
}
|
||||
func (m *UpdateGroupRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UpdateGroupRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UpdateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UpdateGroupRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UpdateGroupRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UpdateGroupRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *UpdateGroupRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_UpdateGroupRequest.Size(m)
|
||||
}
|
||||
func (m *UpdateGroupRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UpdateGroupRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UpdateGroupRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *UpdateGroupRequest) GetGroup() *Group {
|
||||
if m != nil {
|
||||
return m.Group
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpdateGroupRequest) GetValidateOnly() bool {
|
||||
if m != nil {
|
||||
return m.ValidateOnly
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The `DeleteGroup` request. You can only delete a group if it has no children.
|
||||
type DeleteGroupRequest struct {
|
||||
// The group to delete. The format is
|
||||
// `"projects/{project_id_or_number}/groups/{group_id}"`.
|
||||
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} }
|
||||
func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteGroupRequest) ProtoMessage() {}
|
||||
func (*DeleteGroupRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{5}
|
||||
}
|
||||
func (m *DeleteGroupRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteGroupRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DeleteGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteGroupRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *DeleteGroupRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteGroupRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *DeleteGroupRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteGroupRequest.Size(m)
|
||||
}
|
||||
func (m *DeleteGroupRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DeleteGroupRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DeleteGroupRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DeleteGroupRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The `ListGroupMembers` request.
|
||||
type ListGroupMembersRequest struct {
|
||||
// The group whose members are listed. The format is
|
||||
// `"projects/{project_id_or_number}/groups/{group_id}"`.
|
||||
Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// A positive number that is the maximum number of results to return.
|
||||
PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
// If this field is not empty then it must contain the `nextPageToken` value
|
||||
// returned by a previous call to this method. Using this field causes the
|
||||
// method to return additional results from the previous method call.
|
||||
PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||||
// An optional [list filter](/monitoring/api/learn_more#filtering) describing
|
||||
// the members to be returned. The filter may reference the type, labels, and
|
||||
// metadata of monitored resources that comprise the group.
|
||||
// For example, to return only resources representing Compute Engine VM
|
||||
// instances, use this filter:
|
||||
//
|
||||
// resource.type = "gce_instance"
|
||||
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// An optional time interval for which results should be returned. Only
|
||||
// members that were part of the group during the specified interval are
|
||||
// included in the response. If no interval is provided then the group
|
||||
// membership over the last minute is returned.
|
||||
Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} }
|
||||
func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListGroupMembersRequest) ProtoMessage() {}
|
||||
func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{6}
|
||||
}
|
||||
func (m *ListGroupMembersRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListGroupMembersRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListGroupMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListGroupMembersRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListGroupMembersRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListGroupMembersRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *ListGroupMembersRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListGroupMembersRequest.Size(m)
|
||||
}
|
||||
func (m *ListGroupMembersRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListGroupMembersRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListGroupMembersRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListGroupMembersRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersRequest) GetPageSize() int32 {
|
||||
if m != nil {
|
||||
return m.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersRequest) GetPageToken() string {
|
||||
if m != nil {
|
||||
return m.PageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersRequest) GetFilter() string {
|
||||
if m != nil {
|
||||
return m.Filter
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersRequest) GetInterval() *TimeInterval {
|
||||
if m != nil {
|
||||
return m.Interval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The `ListGroupMembers` response.
|
||||
type ListGroupMembersResponse struct {
|
||||
// A set of monitored resources in the group.
|
||||
Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"`
|
||||
// If there are more results than have been returned, then this field is
|
||||
// set to a non-empty value. To see the additional results, use that value as
|
||||
// `pageToken` in the next call to this method.
|
||||
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||||
// The total number of elements matching this request.
|
||||
TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} }
|
||||
func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListGroupMembersResponse) ProtoMessage() {}
|
||||
func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_group_service_9b35a79e21b496f3, []int{7}
|
||||
}
|
||||
func (m *ListGroupMembersResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListGroupMembersResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListGroupMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListGroupMembersResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListGroupMembersResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListGroupMembersResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *ListGroupMembersResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListGroupMembersResponse.Size(m)
|
||||
}
|
||||
func (m *ListGroupMembersResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListGroupMembersResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListGroupMembersResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource {
|
||||
if m != nil {
|
||||
return m.Members
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersResponse) GetNextPageToken() string {
|
||||
if m != nil {
|
||||
return m.NextPageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListGroupMembersResponse) GetTotalSize() int32 {
|
||||
if m != nil {
|
||||
return m.TotalSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ListGroupsRequest)(nil), "google.monitoring.v3.ListGroupsRequest")
|
||||
proto.RegisterType((*ListGroupsResponse)(nil), "google.monitoring.v3.ListGroupsResponse")
|
||||
proto.RegisterType((*GetGroupRequest)(nil), "google.monitoring.v3.GetGroupRequest")
|
||||
proto.RegisterType((*CreateGroupRequest)(nil), "google.monitoring.v3.CreateGroupRequest")
|
||||
proto.RegisterType((*UpdateGroupRequest)(nil), "google.monitoring.v3.UpdateGroupRequest")
|
||||
proto.RegisterType((*DeleteGroupRequest)(nil), "google.monitoring.v3.DeleteGroupRequest")
|
||||
proto.RegisterType((*ListGroupMembersRequest)(nil), "google.monitoring.v3.ListGroupMembersRequest")
|
||||
proto.RegisterType((*ListGroupMembersResponse)(nil), "google.monitoring.v3.ListGroupMembersResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// GroupServiceClient is the client API for GroupService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type GroupServiceClient interface {
|
||||
// Lists the existing groups.
|
||||
ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error)
|
||||
// Gets a single group.
|
||||
GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error)
|
||||
// Creates a new group.
|
||||
CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error)
|
||||
// Updates an existing group.
|
||||
// You can change any group attributes except `name`.
|
||||
UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error)
|
||||
// Deletes an existing group.
|
||||
DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Lists the monitored resources that are members of a group.
|
||||
ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error)
|
||||
}
|
||||
|
||||
type groupServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewGroupServiceClient(cc *grpc.ClientConn) GroupServiceClient {
|
||||
return &groupServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) {
|
||||
out := new(ListGroupsResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) {
|
||||
out := new(Group)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
|
||||
out := new(Group)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
|
||||
out := new(Group)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) {
|
||||
out := new(ListGroupMembersResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// GroupServiceServer is the server API for GroupService service.
|
||||
type GroupServiceServer interface {
|
||||
// Lists the existing groups.
|
||||
ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error)
|
||||
// Gets a single group.
|
||||
GetGroup(context.Context, *GetGroupRequest) (*Group, error)
|
||||
// Creates a new group.
|
||||
CreateGroup(context.Context, *CreateGroupRequest) (*Group, error)
|
||||
// Updates an existing group.
|
||||
// You can change any group attributes except `name`.
|
||||
UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error)
|
||||
// Deletes an existing group.
|
||||
DeleteGroup(context.Context, *DeleteGroupRequest) (*empty.Empty, error)
|
||||
// Lists the monitored resources that are members of a group.
|
||||
ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error)
|
||||
}
|
||||
|
||||
func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) {
|
||||
s.RegisterService(&_GroupService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListGroupsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GroupServiceServer).ListGroups(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.GroupService/ListGroups",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetGroupRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GroupServiceServer).GetGroup(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.GroupService/GetGroup",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateGroupRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GroupServiceServer).CreateGroup(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.GroupService/CreateGroup",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateGroupRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GroupServiceServer).UpdateGroup(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteGroupRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GroupServiceServer).DeleteGroup(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListGroupMembersRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GroupServiceServer).ListGroupMembers(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _GroupService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.monitoring.v3.GroupService",
|
||||
HandlerType: (*GroupServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ListGroups",
|
||||
Handler: _GroupService_ListGroups_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetGroup",
|
||||
Handler: _GroupService_GetGroup_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateGroup",
|
||||
Handler: _GroupService_CreateGroup_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateGroup",
|
||||
Handler: _GroupService_UpdateGroup_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteGroup",
|
||||
Handler: _GroupService_DeleteGroup_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListGroupMembers",
|
||||
Handler: _GroupService_ListGroupMembers_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/monitoring/v3/group_service.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor_group_service_9b35a79e21b496f3)
|
||||
}
|
||||
|
||||
var fileDescriptor_group_service_9b35a79e21b496f3 = []byte{
|
||||
// 826 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x4c,
|
||||
0x10, 0x7e, 0xdd, 0xa4, 0x69, 0xb2, 0x69, 0xd5, 0x76, 0x55, 0xf5, 0x8d, 0xdc, 0x0f, 0x05, 0xf7,
|
||||
0x83, 0xa8, 0x50, 0x5b, 0x24, 0x07, 0x24, 0x10, 0x3d, 0xb4, 0xa0, 0x82, 0x44, 0xd5, 0xca, 0x2d,
|
||||
0x3d, 0xa0, 0x4a, 0x91, 0x9b, 0x4c, 0x8c, 0xc1, 0xde, 0x35, 0xf6, 0x26, 0xd0, 0xa2, 0x4a, 0x80,
|
||||
0xc4, 0x81, 0x33, 0x37, 0x6e, 0x1c, 0xe1, 0x2f, 0x70, 0xe2, 0xca, 0x95, 0xbf, 0xc0, 0xff, 0x00,
|
||||
0x79, 0xbd, 0x9b, 0x38, 0x9f, 0xed, 0x85, 0x5b, 0xb2, 0xf3, 0x8c, 0x9f, 0x67, 0x66, 0x9f, 0x99,
|
||||
0x45, 0x25, 0x9b, 0x52, 0xdb, 0x05, 0xc3, 0xa3, 0xc4, 0x61, 0x34, 0x70, 0x88, 0x6d, 0xb4, 0x2a,
|
||||
0x86, 0x1d, 0xd0, 0xa6, 0x5f, 0x0d, 0x21, 0x68, 0x39, 0x35, 0xd0, 0xfd, 0x80, 0x32, 0x8a, 0xe7,
|
||||
0x62, 0xa4, 0xde, 0x41, 0xea, 0xad, 0x8a, 0xba, 0x28, 0xf2, 0x2d, 0xdf, 0x31, 0x2c, 0x42, 0x28,
|
||||
0xb3, 0x98, 0x43, 0x49, 0x18, 0xe7, 0xa8, 0x2b, 0x89, 0xa8, 0xc8, 0x83, 0x7a, 0x35, 0x80, 0x90,
|
||||
0x36, 0x03, 0xf9, 0x61, 0xf5, 0xda, 0x40, 0x09, 0x35, 0xea, 0x79, 0x94, 0x08, 0x48, 0x71, 0xb8,
|
||||
0x4a, 0x81, 0x58, 0x10, 0x08, 0xfe, 0xef, 0xb4, 0xd9, 0x30, 0xc0, 0xf3, 0xd9, 0x59, 0x1c, 0xd4,
|
||||
0xfe, 0x28, 0x68, 0xf6, 0xb1, 0x13, 0xb2, 0xdd, 0x28, 0x21, 0x34, 0xe1, 0x65, 0x13, 0x42, 0x86,
|
||||
0x31, 0x4a, 0x13, 0xcb, 0x83, 0xc2, 0x44, 0x51, 0x29, 0xe5, 0x4c, 0xfe, 0x1b, 0xdf, 0x44, 0xb3,
|
||||
0xb5, 0x67, 0x8e, 0x5b, 0x0f, 0x80, 0x54, 0x69, 0xa3, 0xca, 0x19, 0x0a, 0x63, 0x11, 0xe0, 0xe1,
|
||||
0x7f, 0xe6, 0xb4, 0x0c, 0xed, 0x37, 0xf8, 0x97, 0xb0, 0x8e, 0xb0, 0x45, 0x6a, 0x10, 0x32, 0x1a,
|
||||
0x84, 0x1d, 0x78, 0x4a, 0xc0, 0x67, 0xda, 0x31, 0x89, 0x2f, 0xa3, 0xb9, 0x3a, 0x84, 0x35, 0x20,
|
||||
0x75, 0x8b, 0xb0, 0x44, 0x46, 0x5a, 0x64, 0xe0, 0x44, 0x54, 0xe6, 0x2c, 0xa0, 0x9c, 0x6f, 0xd9,
|
||||
0x50, 0x0d, 0x9d, 0x73, 0x28, 0x8c, 0x17, 0x95, 0xd2, 0xb8, 0x99, 0x8d, 0x0e, 0x0e, 0x9d, 0x73,
|
||||
0xc0, 0x4b, 0x08, 0xf1, 0x20, 0xa3, 0x2f, 0x80, 0x14, 0x32, 0xbc, 0x10, 0x0e, 0x3f, 0x8a, 0x0e,
|
||||
0xb6, 0xb3, 0x28, 0xd3, 0x70, 0x5c, 0x06, 0x81, 0x46, 0x11, 0x4e, 0x36, 0x20, 0xf4, 0x29, 0x09,
|
||||
0x01, 0xdf, 0x42, 0xe3, 0xb1, 0x00, 0xa5, 0x98, 0x2a, 0xe5, 0xcb, 0x0b, 0xfa, 0xa0, 0x2b, 0xd6,
|
||||
0x79, 0x92, 0x19, 0x23, 0xf1, 0x3a, 0x9a, 0x26, 0xf0, 0x9a, 0x55, 0x13, 0xb4, 0xbc, 0x3d, 0xe6,
|
||||
0x54, 0x74, 0x7c, 0x20, 0xa9, 0xb5, 0x35, 0x34, 0xbd, 0x0b, 0x31, 0x5f, 0x6f, 0xbf, 0x53, 0x9d,
|
||||
0x7e, 0x6b, 0x6f, 0x15, 0x84, 0x77, 0x02, 0xb0, 0x18, 0x0c, 0x84, 0xa6, 0x13, 0x57, 0xd3, 0x16,
|
||||
0x1b, 0xf1, 0x5d, 0x4d, 0xec, 0x0a, 0x9a, 0x6a, 0x59, 0xae, 0x53, 0xb7, 0x18, 0x54, 0x29, 0x71,
|
||||
0xcf, 0x38, 0x75, 0xd6, 0x9c, 0x94, 0x87, 0xfb, 0xc4, 0x3d, 0xd3, 0x5c, 0x84, 0x9f, 0xf8, 0xf5,
|
||||
0x5e, 0x05, 0xff, 0x8a, 0xad, 0x84, 0xf0, 0x7d, 0x70, 0x61, 0x48, 0xbd, 0xc9, 0xd6, 0xfc, 0x50,
|
||||
0xd0, 0xff, 0xed, 0x3b, 0xdb, 0x03, 0xef, 0x14, 0x82, 0x91, 0xd6, 0xed, 0x32, 0x4a, 0x6a, 0xa4,
|
||||
0x51, 0xd2, 0x3d, 0x46, 0xc1, 0xf3, 0xd2, 0x28, 0xdc, 0x61, 0x39, 0x53, 0xfc, 0xc3, 0x5b, 0x28,
|
||||
0xeb, 0x10, 0x06, 0x41, 0xcb, 0x72, 0xb9, 0xbb, 0xf2, 0x65, 0x6d, 0x70, 0x23, 0x8e, 0x1c, 0x0f,
|
||||
0x1e, 0x09, 0xa4, 0xd9, 0xce, 0xd1, 0x3e, 0x2b, 0xa8, 0xd0, 0x5f, 0x83, 0x70, 0xdf, 0x6d, 0x34,
|
||||
0xe1, 0xc5, 0x47, 0xc2, 0x7f, 0x4b, 0xf2, 0xdb, 0x96, 0xef, 0xe8, 0x7b, 0x72, 0x5d, 0x98, 0x62,
|
||||
0x5b, 0x98, 0x12, 0x7d, 0x55, 0x0f, 0x46, 0x45, 0x33, 0xca, 0x2c, 0x37, 0xd9, 0x92, 0x1c, 0x3f,
|
||||
0x89, 0x7a, 0x52, 0xfe, 0x9e, 0x41, 0x93, 0x5c, 0xd8, 0x61, 0xbc, 0xe7, 0xf0, 0x07, 0x05, 0xa1,
|
||||
0xce, 0x94, 0xe0, 0xeb, 0x83, 0x4b, 0xed, 0x5b, 0x24, 0x6a, 0xe9, 0x72, 0x60, 0x5c, 0xb2, 0xb6,
|
||||
0xfa, 0xfe, 0xd7, 0xef, 0x4f, 0x63, 0xcb, 0x78, 0x31, 0x5a, 0x5f, 0x6f, 0xa2, 0x6b, 0xbb, 0xe7,
|
||||
0x07, 0xf4, 0x39, 0xd4, 0x58, 0x68, 0x6c, 0x5c, 0xc4, 0x0b, 0x2d, 0xc4, 0x2d, 0x94, 0x95, 0xb3,
|
||||
0x83, 0xd7, 0x86, 0x18, 0xaf, 0x7b, 0xb6, 0xd4, 0x51, 0xfe, 0xd4, 0xd6, 0x39, 0x6b, 0x11, 0x2f,
|
||||
0x0f, 0x62, 0x15, 0xa4, 0xc6, 0xc6, 0x05, 0x7e, 0xa7, 0xa0, 0x7c, 0x62, 0x18, 0xf1, 0x90, 0xba,
|
||||
0xfa, 0xe7, 0x75, 0x34, 0xfd, 0x0d, 0x4e, 0xbf, 0xa6, 0x8d, 0x2c, 0xfa, 0x8e, 0x18, 0xa2, 0x8f,
|
||||
0x0a, 0xca, 0x27, 0xc6, 0x71, 0x98, 0x86, 0xfe, 0x89, 0x1d, 0xad, 0xa1, 0xc2, 0x35, 0x6c, 0xaa,
|
||||
0xab, 0x5c, 0x43, 0xfc, 0x70, 0x0c, 0x6d, 0x84, 0xd4, 0xf2, 0x0a, 0xe5, 0x13, 0xb3, 0x3a, 0x4c,
|
||||
0x4a, 0xff, 0x38, 0xab, 0xf3, 0x12, 0x29, 0x5f, 0x23, 0xfd, 0x41, 0xf4, 0x1a, 0xc9, 0x8b, 0xd8,
|
||||
0xb8, 0xec, 0x22, 0xbe, 0x28, 0x68, 0xa6, 0x77, 0x6c, 0xf0, 0xe6, 0x25, 0x2e, 0xeb, 0x5e, 0x11,
|
||||
0xaa, 0x7e, 0x55, 0xb8, 0xb0, 0xa6, 0xce, 0xb5, 0x95, 0xf0, 0xfa, 0x68, 0x6d, 0x86, 0x18, 0xc2,
|
||||
0xed, 0xaf, 0x0a, 0x2a, 0xd4, 0xa8, 0x37, 0x90, 0x65, 0x7b, 0x36, 0x39, 0x57, 0x07, 0x51, 0x13,
|
||||
0x0e, 0x94, 0xa7, 0x5b, 0x02, 0x6a, 0x53, 0xd7, 0x22, 0xb6, 0x4e, 0x03, 0xdb, 0xb0, 0x81, 0xf0,
|
||||
0x16, 0x19, 0x71, 0xc8, 0xf2, 0x9d, 0xb0, 0xfb, 0x8d, 0xbf, 0xdb, 0xf9, 0xf7, 0x6d, 0x4c, 0xdd,
|
||||
0x8d, 0x3f, 0xb0, 0xe3, 0xd2, 0x66, 0x5d, 0x2e, 0x88, 0x88, 0xf1, 0xb8, 0xf2, 0x53, 0x06, 0x4f,
|
||||
0x78, 0xf0, 0xa4, 0x13, 0x3c, 0x39, 0xae, 0x9c, 0x66, 0x38, 0x49, 0xe5, 0x6f, 0x00, 0x00, 0x00,
|
||||
0xff, 0xff, 0x86, 0x94, 0xf2, 0xde, 0xed, 0x08, 0x00, 0x00,
|
||||
}
|
216
vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
generated
vendored
Normal file
216
vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/metric.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import metric "google.golang.org/genproto/googleapis/api/metric"
|
||||
import monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A single data point in a time series.
|
||||
type Point struct {
|
||||
// The time interval to which the data point applies. For GAUGE metrics, only
|
||||
// the end time of the interval is used. For DELTA metrics, the start and end
|
||||
// time should specify a non-zero interval, with subsequent points specifying
|
||||
// contiguous and non-overlapping intervals. For CUMULATIVE metrics, the
|
||||
// start and end time should specify a non-zero interval, with subsequent
|
||||
// points specifying the same start time and increasing end times, until an
|
||||
// event resets the cumulative value to zero and sets a new start time for the
|
||||
// following points.
|
||||
Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"`
|
||||
// The value of the data point.
|
||||
Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Point) Reset() { *m = Point{} }
|
||||
func (m *Point) String() string { return proto.CompactTextString(m) }
|
||||
func (*Point) ProtoMessage() {}
|
||||
func (*Point) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metric_d0e473d254297f3e, []int{0}
|
||||
}
|
||||
func (m *Point) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Point.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Point.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Point) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Point.Merge(dst, src)
|
||||
}
|
||||
func (m *Point) XXX_Size() int {
|
||||
return xxx_messageInfo_Point.Size(m)
|
||||
}
|
||||
func (m *Point) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Point.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Point proto.InternalMessageInfo
|
||||
|
||||
func (m *Point) GetInterval() *TimeInterval {
|
||||
if m != nil {
|
||||
return m.Interval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Point) GetValue() *TypedValue {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A collection of data points that describes the time-varying values
|
||||
// of a metric. A time series is identified by a combination of a
|
||||
// fully-specified monitored resource and a fully-specified metric.
|
||||
// This type is used for both listing and creating time series.
|
||||
type TimeSeries struct {
|
||||
// The associated metric. A fully-specified metric used to identify the time
|
||||
// series.
|
||||
Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"`
|
||||
// The associated resource. A fully-specified monitored resource used to
|
||||
// identify the time series.
|
||||
Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"`
|
||||
// The metric kind of the time series. When listing time series, this metric
|
||||
// kind might be different from the metric kind of the associated metric if
|
||||
// this time series is an alignment or reduction of other time series.
|
||||
//
|
||||
// When creating a time series, this field is optional. If present, it must be
|
||||
// the same as the metric kind of the associated metric. If the associated
|
||||
// metric's descriptor must be auto-created, then this field specifies the
|
||||
// metric kind of the new descriptor and must be either `GAUGE` (the default)
|
||||
// or `CUMULATIVE`.
|
||||
MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
|
||||
// The value type of the time series. When listing time series, this value
|
||||
// type might be different from the value type of the associated metric if
|
||||
// this time series is an alignment or reduction of other time series.
|
||||
//
|
||||
// When creating a time series, this field is optional. If present, it must be
|
||||
// the same as the type of the data in the `points` field.
|
||||
ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
|
||||
// The data points of this time series. When listing time series, the order of
|
||||
// the points is specified by the list method.
|
||||
//
|
||||
// When creating a time series, this field must contain exactly one point and
|
||||
// the point's type must be the same as the value type of the associated
|
||||
// metric. If the associated metric's descriptor must be auto-created, then
|
||||
// the value type of the descriptor is determined by the point's type, which
|
||||
// must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.
|
||||
Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
|
||||
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
|
||||
func (*TimeSeries) ProtoMessage() {}
|
||||
func (*TimeSeries) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metric_d0e473d254297f3e, []int{1}
|
||||
}
|
||||
func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TimeSeries.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *TimeSeries) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TimeSeries.Merge(dst, src)
|
||||
}
|
||||
func (m *TimeSeries) XXX_Size() int {
|
||||
return xxx_messageInfo_TimeSeries.Size(m)
|
||||
}
|
||||
func (m *TimeSeries) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TimeSeries.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
|
||||
|
||||
func (m *TimeSeries) GetMetric() *metric.Metric {
|
||||
if m != nil {
|
||||
return m.Metric
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TimeSeries) GetResource() *monitoredres.MonitoredResource {
|
||||
if m != nil {
|
||||
return m.Resource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind {
|
||||
if m != nil {
|
||||
return m.MetricKind
|
||||
}
|
||||
return metric.MetricDescriptor_METRIC_KIND_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType {
|
||||
if m != nil {
|
||||
return m.ValueType
|
||||
}
|
||||
return metric.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *TimeSeries) GetPoints() []*Point {
|
||||
if m != nil {
|
||||
return m.Points
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Point)(nil), "google.monitoring.v3.Point")
|
||||
proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor_metric_d0e473d254297f3e)
|
||||
}
|
||||
|
||||
var fileDescriptor_metric_d0e473d254297f3e = []byte{
|
||||
// 396 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4a, 0xeb, 0x40,
|
||||
0x14, 0x86, 0x49, 0x7b, 0x5b, 0x7a, 0x27, 0x70, 0x17, 0xc3, 0x05, 0x43, 0x45, 0x88, 0x15, 0xb4,
|
||||
0xb8, 0x48, 0xa0, 0x01, 0x41, 0x84, 0x2e, 0xaa, 0xa2, 0x22, 0x42, 0x19, 0xa5, 0x0b, 0x29, 0x94,
|
||||
0x98, 0x0c, 0x61, 0x30, 0x99, 0x33, 0x4c, 0xd2, 0x40, 0x57, 0x3e, 0x8c, 0x3b, 0xdf, 0xc0, 0x57,
|
||||
0xf0, 0xa9, 0x24, 0x33, 0x93, 0xd6, 0x62, 0x74, 0x37, 0xc9, 0xff, 0x9d, 0xff, 0x9f, 0x73, 0xce,
|
||||
0xa0, 0xfd, 0x04, 0x20, 0x49, 0xa9, 0x9f, 0x01, 0x67, 0x05, 0x48, 0xc6, 0x13, 0xbf, 0x0c, 0xfc,
|
||||
0x8c, 0x16, 0x92, 0x45, 0x9e, 0x90, 0x50, 0x00, 0xfe, 0xaf, 0x11, 0x6f, 0x83, 0x78, 0x65, 0xd0,
|
||||
0xdf, 0x31, 0x85, 0xa1, 0x60, 0x5b, 0x78, 0xff, 0xe0, 0xab, 0xa0, 0x4b, 0x68, 0xbc, 0x90, 0x34,
|
||||
0x87, 0xa5, 0x8c, 0xa8, 0x81, 0x9a, 0x63, 0x23, 0xc8, 0x32, 0xe0, 0x1a, 0x19, 0xbc, 0xa0, 0xce,
|
||||
0x14, 0x18, 0x2f, 0xf0, 0x18, 0xf5, 0x18, 0x2f, 0xa8, 0x2c, 0xc3, 0xd4, 0xb1, 0x5c, 0x6b, 0x68,
|
||||
0x8f, 0x06, 0x5e, 0xd3, 0x95, 0xbc, 0x07, 0x96, 0xd1, 0x1b, 0x43, 0x92, 0x75, 0x0d, 0x3e, 0x41,
|
||||
0x9d, 0x32, 0x4c, 0x97, 0xd4, 0x69, 0xa9, 0x62, 0xf7, 0x87, 0xe2, 0x95, 0xa0, 0xf1, 0xac, 0xe2,
|
||||
0x88, 0xc6, 0x07, 0xef, 0x2d, 0x84, 0x2a, 0xcb, 0x7b, 0x2a, 0x19, 0xcd, 0xf1, 0x31, 0xea, 0xea,
|
||||
0x3e, 0xcd, 0x25, 0x70, 0xed, 0x13, 0x0a, 0xe6, 0xdd, 0x29, 0x85, 0x18, 0x02, 0x9f, 0xa2, 0x5e,
|
||||
0xdd, 0xb0, 0x49, 0xdd, 0xdb, 0xa2, 0xeb, 0xb1, 0x10, 0x03, 0x91, 0x35, 0x8e, 0xaf, 0x91, 0xad,
|
||||
0x4d, 0x16, 0xcf, 0x8c, 0xc7, 0x4e, 0xdb, 0xb5, 0x86, 0xff, 0x46, 0x47, 0xdf, 0xb3, 0x2e, 0x68,
|
||||
0x1e, 0x49, 0x26, 0x0a, 0x90, 0xe6, 0xc7, 0x2d, 0xe3, 0x31, 0x41, 0xd9, 0xfa, 0x8c, 0x2f, 0x11,
|
||||
0x52, 0x8d, 0x2c, 0x8a, 0x95, 0xa0, 0xce, 0x1f, 0x65, 0x74, 0xf8, 0xab, 0x91, 0x6a, 0xbf, 0x1a,
|
||||
0x04, 0xf9, 0x5b, 0xd6, 0x47, 0x1c, 0xa0, 0xae, 0xa8, 0xf6, 0x90, 0x3b, 0x1d, 0xb7, 0x3d, 0xb4,
|
||||
0x47, 0xbb, 0xcd, 0xf3, 0x53, 0xbb, 0x22, 0x06, 0x9d, 0xbc, 0x5a, 0xc8, 0x89, 0x20, 0x6b, 0x44,
|
||||
0x27, 0xb6, 0x0e, 0x9e, 0x56, 0x6b, 0x9e, 0x5a, 0x8f, 0x63, 0x03, 0x25, 0x90, 0x86, 0x3c, 0xf1,
|
||||
0x40, 0x26, 0x7e, 0x42, 0xb9, 0x7a, 0x04, 0xbe, 0x96, 0x42, 0xc1, 0xf2, 0xed, 0xa7, 0x72, 0xb6,
|
||||
0xf9, 0x7a, 0x6b, 0xf5, 0xaf, 0xb4, 0xc1, 0x79, 0x0a, 0xcb, 0xb8, 0x1e, 0x6e, 0x95, 0x35, 0x0b,
|
||||
0x3e, 0x6a, 0x71, 0xae, 0xc4, 0xf9, 0x46, 0x9c, 0xcf, 0x82, 0xa7, 0xae, 0x0a, 0x09, 0x3e, 0x03,
|
||||
0x00, 0x00, 0xff, 0xff, 0x28, 0x45, 0x7a, 0x13, 0x05, 0x03, 0x00, 0x00,
|
||||
}
|
1195
vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
generated
vendored
Normal file
1195
vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
97
vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go
generated
vendored
Normal file
97
vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/mutation_record.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Describes a change made to a configuration.
|
||||
type MutationRecord struct {
|
||||
// When the change occurred.
|
||||
MutateTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"`
|
||||
// The email address of the user making the change.
|
||||
MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MutationRecord) Reset() { *m = MutationRecord{} }
|
||||
func (m *MutationRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutationRecord) ProtoMessage() {}
|
||||
func (*MutationRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_mutation_record_7a7b59a768928dc9, []int{0}
|
||||
}
|
||||
func (m *MutationRecord) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MutationRecord.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MutationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MutationRecord.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *MutationRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MutationRecord.Merge(dst, src)
|
||||
}
|
||||
func (m *MutationRecord) XXX_Size() int {
|
||||
return xxx_messageInfo_MutationRecord.Size(m)
|
||||
}
|
||||
func (m *MutationRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MutationRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MutationRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *MutationRecord) GetMutateTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.MutateTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MutationRecord) GetMutatedBy() string {
|
||||
if m != nil {
|
||||
return m.MutatedBy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*MutationRecord)(nil), "google.monitoring.v3.MutationRecord")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/mutation_record.proto", fileDescriptor_mutation_record_7a7b59a768928dc9)
|
||||
}
|
||||
|
||||
var fileDescriptor_mutation_record_7a7b59a768928dc9 = []byte{
|
||||
// 251 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33,
|
||||
0xd6, 0xcf, 0x2d, 0x2d, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a,
|
||||
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0xa8, 0xd5, 0x43, 0xa8, 0xd5, 0x2b, 0x33,
|
||||
0x96, 0x92, 0x87, 0x9a, 0x00, 0x56, 0x93, 0x54, 0x9a, 0xa6, 0x5f, 0x92, 0x99, 0x9b, 0x5a, 0x5c,
|
||||
0x92, 0x98, 0x5b, 0x00, 0xd1, 0xa6, 0x94, 0xc3, 0xc5, 0xe7, 0x0b, 0x35, 0x2f, 0x08, 0x6c, 0x9c,
|
||||
0x90, 0x35, 0x17, 0x37, 0xd8, 0x86, 0xd4, 0x78, 0x90, 0x5a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e,
|
||||
0x23, 0x29, 0x3d, 0xa8, 0xf1, 0x30, 0x83, 0xf4, 0x42, 0x60, 0x06, 0x05, 0x71, 0x41, 0x94, 0x83,
|
||||
0x04, 0x84, 0x64, 0xb9, 0xa0, 0xbc, 0x94, 0xf8, 0xa4, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce,
|
||||
0x20, 0x4e, 0xa8, 0x88, 0x53, 0xa5, 0xd3, 0x6a, 0x46, 0x2e, 0x89, 0xe4, 0xfc, 0x5c, 0x3d, 0x6c,
|
||||
0x6e, 0x75, 0x12, 0x46, 0x75, 0x48, 0x00, 0xc8, 0xa6, 0x00, 0xc6, 0x28, 0x3b, 0xa8, 0xe2, 0xf4,
|
||||
0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x3b, 0xf4,
|
||||
0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xa8, 0x61, 0x64, 0x8d, 0xe0, 0xad, 0x62, 0x92, 0x72, 0x87,
|
||||
0x18, 0xe0, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, 0x8b, 0xb0, 0x33, 0xcc, 0xf8, 0x14, 0x4c, 0x32,
|
||||
0x06, 0x2c, 0x19, 0x83, 0x90, 0x8c, 0x09, 0x33, 0x4e, 0x62, 0x03, 0x5b, 0x62, 0x0c, 0x08, 0x00,
|
||||
0x00, 0xff, 0xff, 0x95, 0xa7, 0xf3, 0xbd, 0x87, 0x01, 0x00, 0x00,
|
||||
}
|
367
vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go
generated
vendored
Normal file
367
vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go
generated
vendored
Normal file
@@ -0,0 +1,367 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/notification.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import wrappers "github.com/golang/protobuf/ptypes/wrappers"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import label "google.golang.org/genproto/googleapis/api/label"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Indicates whether the channel has been verified or not. It is illegal
|
||||
// to specify this field in a
|
||||
// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel]
|
||||
// or an
|
||||
// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
|
||||
// operation.
|
||||
type NotificationChannel_VerificationStatus int32
|
||||
|
||||
const (
|
||||
// Sentinel value used to indicate that the state is unknown, omitted, or
|
||||
// is not applicable (as in the case of channels that neither support
|
||||
// nor require verification in order to function).
|
||||
NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0
|
||||
// The channel has yet to be verified and requires verification to function.
|
||||
// Note that this state also applies to the case where the verification
|
||||
// process has been initiated by sending a verification code but where
|
||||
// the verification code has not been submitted to complete the process.
|
||||
NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1
|
||||
// It has been proven that notifications can be received on this
|
||||
// notification channel and that someone on the project has access
|
||||
// to messages that are delivered to that channel.
|
||||
NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2
|
||||
)
|
||||
|
||||
var NotificationChannel_VerificationStatus_name = map[int32]string{
|
||||
0: "VERIFICATION_STATUS_UNSPECIFIED",
|
||||
1: "UNVERIFIED",
|
||||
2: "VERIFIED",
|
||||
}
|
||||
var NotificationChannel_VerificationStatus_value = map[string]int32{
|
||||
"VERIFICATION_STATUS_UNSPECIFIED": 0,
|
||||
"UNVERIFIED": 1,
|
||||
"VERIFIED": 2,
|
||||
}
|
||||
|
||||
func (x NotificationChannel_VerificationStatus) String() string {
|
||||
return proto.EnumName(NotificationChannel_VerificationStatus_name, int32(x))
|
||||
}
|
||||
func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_notification_22cdd47ecea3076c, []int{1, 0}
|
||||
}
|
||||
|
||||
// A description of a notification channel. The descriptor includes
|
||||
// the properties of the channel and the set of labels or fields that
|
||||
// must be specified to configure channels of a given type.
|
||||
type NotificationChannelDescriptor struct {
|
||||
// The full REST resource name for this descriptor. The syntax is:
|
||||
//
|
||||
// projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE]
|
||||
//
|
||||
// In the above, `[TYPE]` is the value of the `type` field.
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The type of notification channel, such as "email", "sms", etc.
|
||||
// Notification channel types are globally unique.
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
// A human-readable name for the notification channel type. This
|
||||
// form of the name is suitable for a user interface.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// A human-readable description of the notification channel
|
||||
// type. The description may include a description of the properties
|
||||
// of the channel and pointers to external documentation.
|
||||
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
|
||||
// The set of labels that must be defined to identify a particular
|
||||
// channel of the corresponding type. Each label includes a
|
||||
// description for how that field should be populated.
|
||||
Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
|
||||
// The tiers that support this notification channel; the project service tier
|
||||
// must be one of the supported_tiers.
|
||||
SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *NotificationChannelDescriptor) Reset() { *m = NotificationChannelDescriptor{} }
|
||||
func (m *NotificationChannelDescriptor) String() string { return proto.CompactTextString(m) }
|
||||
func (*NotificationChannelDescriptor) ProtoMessage() {}
|
||||
func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_notification_22cdd47ecea3076c, []int{0}
|
||||
}
|
||||
func (m *NotificationChannelDescriptor) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NotificationChannelDescriptor.Unmarshal(m, b)
|
||||
}
|
||||
func (m *NotificationChannelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_NotificationChannelDescriptor.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *NotificationChannelDescriptor) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NotificationChannelDescriptor.Merge(dst, src)
|
||||
}
|
||||
func (m *NotificationChannelDescriptor) XXX_Size() int {
|
||||
return xxx_messageInfo_NotificationChannelDescriptor.Size(m)
|
||||
}
|
||||
func (m *NotificationChannelDescriptor) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_NotificationChannelDescriptor.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_NotificationChannelDescriptor proto.InternalMessageInfo
|
||||
|
||||
func (m *NotificationChannelDescriptor) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannelDescriptor) GetType() string {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannelDescriptor) GetDisplayName() string {
|
||||
if m != nil {
|
||||
return m.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannelDescriptor) GetDescription() string {
|
||||
if m != nil {
|
||||
return m.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor {
|
||||
if m != nil {
|
||||
return m.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier {
|
||||
if m != nil {
|
||||
return m.SupportedTiers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A `NotificationChannel` is a medium through which an alert is
|
||||
// delivered when a policy violation is detected. Examples of channels
|
||||
// include email, SMS, and third-party messaging applications. Fields
|
||||
// containing sensitive information like authentication tokens or
|
||||
// contact info are only partially populated on retrieval.
|
||||
type NotificationChannel struct {
|
||||
// The type of the notification channel. This field matches the
|
||||
// value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field.
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
// The full REST resource name for this channel. The syntax is:
|
||||
//
|
||||
// projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
|
||||
//
|
||||
// The `[CHANNEL_ID]` is automatically assigned by the server on creation.
|
||||
Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// An optional human-readable name for this notification channel. It is
|
||||
// recommended that you specify a non-empty and unique name in order to
|
||||
// make it easier to identify the channels in your project, though this is
|
||||
// not enforced. The display name is limited to 512 Unicode characters.
|
||||
DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// An optional human-readable description of this notification channel. This
|
||||
// description may provide additional details, beyond the display
|
||||
// name, for the channel. This may not exceeed 1024 Unicode characters.
|
||||
Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
|
||||
// Configuration fields that define the channel and its behavior. The
|
||||
// permissible and required labels are specified in the
|
||||
// [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the
|
||||
// `NotificationChannelDescriptor` corresponding to the `type` field.
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// User-supplied key/value data that does not need to conform to
|
||||
// the corresponding `NotificationChannelDescriptor`'s schema, unlike
|
||||
// the `labels` field. This field is intended to be used for organizing
|
||||
// and identifying the `NotificationChannel` objects.
|
||||
//
|
||||
// The field can contain up to 64 entries. Each key and value is limited to
|
||||
// 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
|
||||
// values can contain only lowercase letters, numerals, underscores, and
|
||||
// dashes. Keys must begin with a letter.
|
||||
UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Indicates whether this channel has been verified or not. On a
|
||||
// [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
|
||||
// or
|
||||
// [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
|
||||
// operation, this field is expected to be populated.
|
||||
//
|
||||
// If the value is `UNVERIFIED`, then it indicates that the channel is
|
||||
// non-functioning (it both requires verification and lacks verification);
|
||||
// otherwise, it is assumed that the channel works.
|
||||
//
|
||||
// If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that
|
||||
// the channel is of a type that does not require verification or that
|
||||
// this specific channel has been exempted from verification because it was
|
||||
// created prior to verification being required for channels of this type.
|
||||
//
|
||||
// This field cannot be modified using a standard
|
||||
// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
|
||||
// operation. To change the value of this field, you must call
|
||||
// [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel].
|
||||
VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"`
|
||||
// Whether notifications are forwarded to the described channel. This makes
|
||||
// it possible to disable delivery of notifications to a particular channel
|
||||
// without removing the channel from all alerting policies that reference
|
||||
// the channel. This is a more convenient approach when the change is
|
||||
// temporary and you want to receive notifications from the same set
|
||||
// of alerting policies on the channel at some point in the future.
|
||||
Enabled *wrappers.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) Reset() { *m = NotificationChannel{} }
|
||||
func (m *NotificationChannel) String() string { return proto.CompactTextString(m) }
|
||||
func (*NotificationChannel) ProtoMessage() {}
|
||||
func (*NotificationChannel) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_notification_22cdd47ecea3076c, []int{1}
|
||||
}
|
||||
func (m *NotificationChannel) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NotificationChannel.Unmarshal(m, b)
|
||||
}
|
||||
func (m *NotificationChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_NotificationChannel.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *NotificationChannel) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_NotificationChannel.Merge(dst, src)
|
||||
}
|
||||
func (m *NotificationChannel) XXX_Size() int {
|
||||
return xxx_messageInfo_NotificationChannel.Size(m)
|
||||
}
|
||||
func (m *NotificationChannel) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_NotificationChannel.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_NotificationChannel proto.InternalMessageInfo
|
||||
|
||||
func (m *NotificationChannel) GetType() string {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) GetDisplayName() string {
|
||||
if m != nil {
|
||||
return m.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) GetDescription() string {
|
||||
if m != nil {
|
||||
return m.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) GetLabels() map[string]string {
|
||||
if m != nil {
|
||||
return m.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) GetUserLabels() map[string]string {
|
||||
if m != nil {
|
||||
return m.UserLabels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus {
|
||||
if m != nil {
|
||||
return m.VerificationStatus
|
||||
}
|
||||
return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *NotificationChannel) GetEnabled() *wrappers.BoolValue {
|
||||
if m != nil {
|
||||
return m.Enabled
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*NotificationChannelDescriptor)(nil), "google.monitoring.v3.NotificationChannelDescriptor")
|
||||
proto.RegisterType((*NotificationChannel)(nil), "google.monitoring.v3.NotificationChannel")
|
||||
proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.LabelsEntry")
|
||||
proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.UserLabelsEntry")
|
||||
proto.RegisterEnum("google.monitoring.v3.NotificationChannel_VerificationStatus", NotificationChannel_VerificationStatus_name, NotificationChannel_VerificationStatus_value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/notification.proto", fileDescriptor_notification_22cdd47ecea3076c)
|
||||
}
|
||||
|
||||
var fileDescriptor_notification_22cdd47ecea3076c = []byte{
|
||||
// 599 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6e, 0xd3, 0x30,
|
||||
0x14, 0xc7, 0x49, 0xbb, 0x8e, 0xcd, 0x99, 0xba, 0xe1, 0x4d, 0x28, 0x0a, 0x5f, 0xdd, 0xb8, 0xa0,
|
||||
0x57, 0x89, 0xd4, 0x82, 0xc4, 0xf8, 0x92, 0xb6, 0xae, 0x43, 0x45, 0xac, 0x4c, 0xfd, 0x42, 0x9a,
|
||||
0x26, 0x55, 0x6e, 0xeb, 0x05, 0x8b, 0xc4, 0x8e, 0x6c, 0x27, 0xa8, 0x0f, 0xc1, 0x63, 0x70, 0x01,
|
||||
0x8f, 0xc2, 0x53, 0xa1, 0x38, 0x6e, 0x12, 0xb6, 0x48, 0x8c, 0x3b, 0x9f, 0x73, 0xfe, 0xe7, 0x7f,
|
||||
0xce, 0xf9, 0x35, 0x2a, 0x78, 0xe6, 0x31, 0xe6, 0xf9, 0xd8, 0x0d, 0x18, 0x25, 0x92, 0x71, 0x42,
|
||||
0x3d, 0x37, 0x6e, 0xbb, 0x94, 0x49, 0x72, 0x45, 0xe6, 0x48, 0x12, 0x46, 0x9d, 0x90, 0x33, 0xc9,
|
||||
0xe0, 0x5e, 0x2a, 0x74, 0x72, 0xa1, 0x13, 0xb7, 0xed, 0x87, 0xba, 0x1d, 0x85, 0xc4, 0x45, 0x94,
|
||||
0x32, 0xa9, 0x5a, 0x44, 0xda, 0x63, 0xdf, 0x2f, 0x54, 0x7d, 0x34, 0xc3, 0xbe, 0xce, 0xef, 0x97,
|
||||
0x0e, 0x9d, 0xb3, 0x20, 0x58, 0x8d, 0xb3, 0x1f, 0x6b, 0x89, 0x8a, 0x66, 0xd1, 0x95, 0xfb, 0x8d,
|
||||
0xa3, 0x30, 0xc4, 0x5c, 0x5b, 0x1f, 0x7c, 0xaf, 0x80, 0x47, 0xfd, 0xc2, 0x96, 0x9d, 0x2f, 0x88,
|
||||
0x52, 0xec, 0x9f, 0x60, 0x31, 0xe7, 0x24, 0x94, 0x8c, 0x43, 0x08, 0xd6, 0x28, 0x0a, 0xb0, 0xb5,
|
||||
0xde, 0x30, 0x9a, 0x9b, 0x03, 0xf5, 0x4e, 0x72, 0x72, 0x19, 0x62, 0xcb, 0x48, 0x73, 0xc9, 0x1b,
|
||||
0xee, 0x83, 0xad, 0x05, 0x11, 0xa1, 0x8f, 0x96, 0x53, 0xa5, 0xaf, 0xa8, 0x9a, 0xa9, 0x73, 0xfd,
|
||||
0xa4, 0xad, 0x01, 0xcc, 0x85, 0x36, 0x26, 0x8c, 0x5a, 0x55, 0xad, 0xc8, 0x53, 0xb0, 0x0d, 0xd6,
|
||||
0xd5, 0x81, 0xc2, 0x5a, 0x6b, 0x54, 0x9b, 0x66, 0xeb, 0x81, 0xa3, 0x71, 0xa1, 0x90, 0x38, 0x1f,
|
||||
0x93, 0x4a, 0xbe, 0xd9, 0x40, 0x4b, 0xe1, 0x07, 0xb0, 0x2d, 0xa2, 0x30, 0x64, 0x5c, 0xe2, 0xc5,
|
||||
0x54, 0x12, 0xcc, 0x85, 0x55, 0x6b, 0x54, 0x9b, 0xf5, 0xd6, 0xbe, 0x53, 0x06, 0xdb, 0x19, 0x62,
|
||||
0x1e, 0x93, 0x39, 0x1e, 0x11, 0xcc, 0x07, 0xf5, 0xac, 0x33, 0x09, 0xc5, 0xc1, 0x8f, 0x1a, 0xd8,
|
||||
0x2d, 0xe1, 0x51, 0x7a, 0x71, 0x19, 0x99, 0xeb, 0x14, 0xaa, 0xff, 0xa4, 0xb0, 0x76, 0x93, 0xc2,
|
||||
0x59, 0x46, 0xa1, 0xa6, 0x28, 0xbc, 0x28, 0xbf, 0xa3, 0x64, 0xcf, 0x94, 0x91, 0xe8, 0x52, 0xc9,
|
||||
0x97, 0x19, 0x9f, 0x0b, 0x60, 0x46, 0x02, 0xf3, 0xa9, 0xf6, 0xdc, 0x50, 0x9e, 0x87, 0xb7, 0xf7,
|
||||
0x1c, 0x0b, 0xcc, 0x8b, 0xbe, 0x20, 0xca, 0x12, 0x30, 0x00, 0xbb, 0x31, 0xe6, 0x59, 0xcb, 0x54,
|
||||
0x48, 0x24, 0x23, 0x61, 0x6d, 0x36, 0x8c, 0x66, 0xbd, 0xf5, 0xe6, 0xf6, 0x33, 0x26, 0x05, 0x93,
|
||||
0xa1, 0xf2, 0x18, 0xc0, 0xf8, 0x46, 0x0e, 0x3e, 0x07, 0x77, 0x31, 0x45, 0x33, 0x1f, 0x2f, 0x2c,
|
||||
0xb3, 0x61, 0x34, 0xcd, 0x96, 0xbd, 0x1a, 0xb1, 0xfa, 0xc0, 0x9d, 0x63, 0xc6, 0xfc, 0x09, 0xf2,
|
||||
0x23, 0x3c, 0x58, 0x49, 0xed, 0x43, 0x60, 0x16, 0xf6, 0x87, 0x3b, 0xa0, 0xfa, 0x15, 0x2f, 0xf5,
|
||||
0x4f, 0x99, 0x3c, 0xe1, 0x1e, 0xa8, 0xc5, 0x49, 0x8b, 0xfe, 0x68, 0xd3, 0xe0, 0x55, 0xe5, 0xa5,
|
||||
0x61, 0xbf, 0x05, 0xdb, 0xd7, 0xce, 0xff, 0x9f, 0xf6, 0x83, 0xcf, 0x00, 0xde, 0xbc, 0x0c, 0x3e,
|
||||
0x05, 0x4f, 0x26, 0xdd, 0x41, 0xef, 0xb4, 0xd7, 0x39, 0x1a, 0xf5, 0x3e, 0xf5, 0xa7, 0xc3, 0xd1,
|
||||
0xd1, 0x68, 0x3c, 0x9c, 0x8e, 0xfb, 0xc3, 0xf3, 0x6e, 0xa7, 0x77, 0xda, 0xeb, 0x9e, 0xec, 0xdc,
|
||||
0x81, 0x75, 0x00, 0xc6, 0xfd, 0x54, 0xd6, 0x3d, 0xd9, 0x31, 0xe0, 0x16, 0xd8, 0xc8, 0xa2, 0xca,
|
||||
0xf1, 0x4f, 0x03, 0x58, 0x73, 0x16, 0x94, 0x02, 0x3e, 0xbe, 0x57, 0x24, 0x7c, 0x9e, 0x80, 0x39,
|
||||
0x37, 0x2e, 0xde, 0x69, 0xa9, 0xc7, 0x7c, 0x44, 0x3d, 0x87, 0x71, 0xcf, 0xf5, 0x30, 0x55, 0xd8,
|
||||
0xdc, 0xb4, 0x84, 0x42, 0x22, 0xfe, 0xfe, 0x2f, 0x79, 0x9d, 0x47, 0xbf, 0x2a, 0xf6, 0xfb, 0xd4,
|
||||
0xa0, 0xe3, 0xb3, 0x68, 0xe1, 0x9c, 0xe5, 0x13, 0x27, 0xed, 0xdf, 0xab, 0xe2, 0xa5, 0x2a, 0x5e,
|
||||
0xe6, 0xc5, 0xcb, 0x49, 0x7b, 0xb6, 0xae, 0x86, 0xb4, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdf,
|
||||
0xb9, 0x3f, 0x8b, 0x24, 0x05, 0x00, 0x00,
|
||||
}
|
1307
vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go
generated
vendored
Normal file
1307
vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
929
vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
generated
vendored
Normal file
929
vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
generated
vendored
Normal file
@@ -0,0 +1,929 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/uptime.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import duration "github.com/golang/protobuf/ptypes/duration"
|
||||
import monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The regions from which an uptime check can be run.
|
||||
type UptimeCheckRegion int32
|
||||
|
||||
const (
|
||||
// Default value if no region is specified. Will result in uptime checks
|
||||
// running from all regions.
|
||||
UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0
|
||||
// Allows checks to run from locations within the United States of America.
|
||||
UptimeCheckRegion_USA UptimeCheckRegion = 1
|
||||
// Allows checks to run from locations within the continent of Europe.
|
||||
UptimeCheckRegion_EUROPE UptimeCheckRegion = 2
|
||||
// Allows checks to run from locations within the continent of South
|
||||
// America.
|
||||
UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3
|
||||
// Allows checks to run from locations within the Asia Pacific area (ex:
|
||||
// Singapore).
|
||||
UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4
|
||||
)
|
||||
|
||||
var UptimeCheckRegion_name = map[int32]string{
|
||||
0: "REGION_UNSPECIFIED",
|
||||
1: "USA",
|
||||
2: "EUROPE",
|
||||
3: "SOUTH_AMERICA",
|
||||
4: "ASIA_PACIFIC",
|
||||
}
|
||||
var UptimeCheckRegion_value = map[string]int32{
|
||||
"REGION_UNSPECIFIED": 0,
|
||||
"USA": 1,
|
||||
"EUROPE": 2,
|
||||
"SOUTH_AMERICA": 3,
|
||||
"ASIA_PACIFIC": 4,
|
||||
}
|
||||
|
||||
func (x UptimeCheckRegion) String() string {
|
||||
return proto.EnumName(UptimeCheckRegion_name, int32(x))
|
||||
}
|
||||
func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0}
|
||||
}
|
||||
|
||||
// The supported resource types that can be used as values of
|
||||
// group_resource.resource_type. gae_app and uptime_url are not allowed
|
||||
// because group checks on App Engine modules and URLs are not allowed.
|
||||
type GroupResourceType int32
|
||||
|
||||
const (
|
||||
// Default value (not valid).
|
||||
GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0
|
||||
// A group of instances (could be either GCE or AWS_EC2).
|
||||
GroupResourceType_INSTANCE GroupResourceType = 1
|
||||
// A group of AWS load balancers.
|
||||
GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2
|
||||
)
|
||||
|
||||
var GroupResourceType_name = map[int32]string{
|
||||
0: "RESOURCE_TYPE_UNSPECIFIED",
|
||||
1: "INSTANCE",
|
||||
2: "AWS_ELB_LOAD_BALANCER",
|
||||
}
|
||||
var GroupResourceType_value = map[string]int32{
|
||||
"RESOURCE_TYPE_UNSPECIFIED": 0,
|
||||
"INSTANCE": 1,
|
||||
"AWS_ELB_LOAD_BALANCER": 2,
|
||||
}
|
||||
|
||||
func (x GroupResourceType) String() string {
|
||||
return proto.EnumName(GroupResourceType_name, int32(x))
|
||||
}
|
||||
func (GroupResourceType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{1}
|
||||
}
|
||||
|
||||
// This message configures which resources and services to monitor for
|
||||
// availability.
|
||||
type UptimeCheckConfig struct {
|
||||
// A unique resource name for this UptimeCheckConfig. The format is:
|
||||
//
|
||||
//
|
||||
// `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`.
|
||||
//
|
||||
// This field should be omitted when creating the uptime check configuration;
|
||||
// on create, the resource name is assigned by the server and included in the
|
||||
// response.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// A human-friendly name for the uptime check configuration. The display name
|
||||
// should be unique within a Stackdriver Account in order to make it easier
|
||||
// to identify; however, uniqueness is not enforced. Required.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// The resource the check is checking. Required.
|
||||
//
|
||||
// Types that are valid to be assigned to Resource:
|
||||
// *UptimeCheckConfig_MonitoredResource
|
||||
// *UptimeCheckConfig_ResourceGroup_
|
||||
Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"`
|
||||
// The type of uptime check request.
|
||||
//
|
||||
// Types that are valid to be assigned to CheckRequestType:
|
||||
// *UptimeCheckConfig_HttpCheck_
|
||||
// *UptimeCheckConfig_TcpCheck_
|
||||
CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"`
|
||||
// How often the uptime check is performed.
|
||||
// Currently, only 1, 5, 10, and 15 minutes are supported. Required.
|
||||
Period *duration.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"`
|
||||
// The maximum amount of time to wait for the request to complete (must be
|
||||
// between 1 and 60 seconds). Required.
|
||||
Timeout *duration.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||
// The expected content on the page the check is run against.
|
||||
// Currently, only the first entry in the list is supported, and other entries
|
||||
// will be ignored. The server will look for an exact match of the string in
|
||||
// the page response's content. This field is optional and should only be
|
||||
// specified if a content match is required.
|
||||
ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"`
|
||||
// The list of regions from which the check will be run.
|
||||
// If this field is specified, enough regions to include a minimum of
|
||||
// 3 locations must be provided, or an error message is returned.
|
||||
// Not specifying this field will result in uptime checks running from all
|
||||
// regions.
|
||||
SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"`
|
||||
// The internal checkers that this check will egress from.
|
||||
InternalCheckers []*UptimeCheckConfig_InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} }
|
||||
func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*UptimeCheckConfig) ProtoMessage() {}
|
||||
func (*UptimeCheckConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0}
|
||||
}
|
||||
func (m *UptimeCheckConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckConfig.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckConfig.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckConfig) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckConfig.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckConfig) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckConfig.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckConfig) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckConfig.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckConfig proto.InternalMessageInfo
|
||||
|
||||
type isUptimeCheckConfig_Resource interface {
|
||||
isUptimeCheckConfig_Resource()
|
||||
}
|
||||
type isUptimeCheckConfig_CheckRequestType interface {
|
||||
isUptimeCheckConfig_CheckRequestType()
|
||||
}
|
||||
|
||||
type UptimeCheckConfig_MonitoredResource struct {
|
||||
MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"`
|
||||
}
|
||||
type UptimeCheckConfig_ResourceGroup_ struct {
|
||||
ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"`
|
||||
}
|
||||
type UptimeCheckConfig_HttpCheck_ struct {
|
||||
HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"`
|
||||
}
|
||||
type UptimeCheckConfig_TcpCheck_ struct {
|
||||
TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {}
|
||||
func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {}
|
||||
func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {}
|
||||
func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {}
|
||||
|
||||
func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource {
|
||||
if m != nil {
|
||||
return m.Resource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType {
|
||||
if m != nil {
|
||||
return m.CheckRequestType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetDisplayName() string {
|
||||
if m != nil {
|
||||
return m.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource {
|
||||
if x, ok := m.GetResource().(*UptimeCheckConfig_MonitoredResource); ok {
|
||||
return x.MonitoredResource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup {
|
||||
if x, ok := m.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok {
|
||||
return x.ResourceGroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck {
|
||||
if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok {
|
||||
return x.HttpCheck
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck {
|
||||
if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok {
|
||||
return x.TcpCheck
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetPeriod() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.Period
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetTimeout() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.Timeout
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher {
|
||||
if m != nil {
|
||||
return m.ContentMatchers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion {
|
||||
if m != nil {
|
||||
return m.SelectedRegions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig) GetInternalCheckers() []*UptimeCheckConfig_InternalChecker {
|
||||
if m != nil {
|
||||
return m.InternalCheckers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*UptimeCheckConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _UptimeCheckConfig_OneofMarshaler, _UptimeCheckConfig_OneofUnmarshaler, _UptimeCheckConfig_OneofSizer, []interface{}{
|
||||
(*UptimeCheckConfig_MonitoredResource)(nil),
|
||||
(*UptimeCheckConfig_ResourceGroup_)(nil),
|
||||
(*UptimeCheckConfig_HttpCheck_)(nil),
|
||||
(*UptimeCheckConfig_TcpCheck_)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func _UptimeCheckConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||
m := msg.(*UptimeCheckConfig)
|
||||
// resource
|
||||
switch x := m.Resource.(type) {
|
||||
case *UptimeCheckConfig_MonitoredResource:
|
||||
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.MonitoredResource); err != nil {
|
||||
return err
|
||||
}
|
||||
case *UptimeCheckConfig_ResourceGroup_:
|
||||
b.EncodeVarint(4<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.ResourceGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("UptimeCheckConfig.Resource has unexpected type %T", x)
|
||||
}
|
||||
// check_request_type
|
||||
switch x := m.CheckRequestType.(type) {
|
||||
case *UptimeCheckConfig_HttpCheck_:
|
||||
b.EncodeVarint(5<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.HttpCheck); err != nil {
|
||||
return err
|
||||
}
|
||||
case *UptimeCheckConfig_TcpCheck_:
|
||||
b.EncodeVarint(6<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.TcpCheck); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("UptimeCheckConfig.CheckRequestType has unexpected type %T", x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _UptimeCheckConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||
m := msg.(*UptimeCheckConfig)
|
||||
switch tag {
|
||||
case 3: // resource.monitored_resource
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(monitoredres.MonitoredResource)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Resource = &UptimeCheckConfig_MonitoredResource{msg}
|
||||
return true, err
|
||||
case 4: // resource.resource_group
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(UptimeCheckConfig_ResourceGroup)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Resource = &UptimeCheckConfig_ResourceGroup_{msg}
|
||||
return true, err
|
||||
case 5: // check_request_type.http_check
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(UptimeCheckConfig_HttpCheck)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.CheckRequestType = &UptimeCheckConfig_HttpCheck_{msg}
|
||||
return true, err
|
||||
case 6: // check_request_type.tcp_check
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(UptimeCheckConfig_TcpCheck)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.CheckRequestType = &UptimeCheckConfig_TcpCheck_{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func _UptimeCheckConfig_OneofSizer(msg proto.Message) (n int) {
|
||||
m := msg.(*UptimeCheckConfig)
|
||||
// resource
|
||||
switch x := m.Resource.(type) {
|
||||
case *UptimeCheckConfig_MonitoredResource:
|
||||
s := proto.Size(x.MonitoredResource)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *UptimeCheckConfig_ResourceGroup_:
|
||||
s := proto.Size(x.ResourceGroup)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
// check_request_type
|
||||
switch x := m.CheckRequestType.(type) {
|
||||
case *UptimeCheckConfig_HttpCheck_:
|
||||
s := proto.Size(x.HttpCheck)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *UptimeCheckConfig_TcpCheck_:
|
||||
s := proto.Size(x.TcpCheck)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// The resource submessage for group checks. It can be used instead of a
|
||||
// monitored resource, when multiple resources are being monitored.
|
||||
type UptimeCheckConfig_ResourceGroup struct {
|
||||
// The group of resources being monitored. Should be only the
|
||||
// group_id, not projects/<project_id>/groups/<group_id>.
|
||||
GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
|
||||
// The resource type of the group members.
|
||||
ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConfig_ResourceGroup{} }
|
||||
func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) }
|
||||
func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {}
|
||||
func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0, 0}
|
||||
}
|
||||
func (m *UptimeCheckConfig_ResourceGroup) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckConfig_ResourceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckConfig_ResourceGroup) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckConfig_ResourceGroup) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckConfig_ResourceGroup) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckConfig_ResourceGroup.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckConfig_ResourceGroup proto.InternalMessageInfo
|
||||
|
||||
func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string {
|
||||
if m != nil {
|
||||
return m.GroupId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType {
|
||||
if m != nil {
|
||||
return m.ResourceType
|
||||
}
|
||||
return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED
|
||||
}
|
||||
|
||||
// Information involved in an HTTP/HTTPS uptime check request.
|
||||
type UptimeCheckConfig_HttpCheck struct {
|
||||
// If true, use HTTPS instead of HTTP to run the check.
|
||||
UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"`
|
||||
// The path to the page to run the check against. Will be combined with the
|
||||
// host (specified within the MonitoredResource) and port to construct the
|
||||
// full URL. Optional (defaults to "/").
|
||||
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
|
||||
// The port to the page to run the check against. Will be combined with host
|
||||
// (specified within the MonitoredResource) and path to construct the full
|
||||
// URL. Optional (defaults to 80 without SSL, or 443 with SSL).
|
||||
Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"`
|
||||
// The authentication information. Optional when creating an HTTP check;
|
||||
// defaults to empty.
|
||||
AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"`
|
||||
// Boolean specifiying whether to encrypt the header information.
|
||||
// Encryption should be specified for any headers related to authentication
|
||||
// that you do not wish to be seen when retrieving the configuration. The
|
||||
// server will be responsible for encrypting the headers.
|
||||
// On Get/List calls, if mask_headers is set to True then the headers
|
||||
// will be obscured with ******.
|
||||
MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"`
|
||||
// The list of headers to send as part of the uptime check request.
|
||||
// If two headers have the same key and different values, they should
|
||||
// be entered as a single header, with the value being a comma-separated
|
||||
// list of all the desired values as described at
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31).
|
||||
// Entering two separate headers with the same key in a Create call will
|
||||
// cause the first to be overwritten by the second.
|
||||
Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} }
|
||||
func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) }
|
||||
func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {}
|
||||
func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0, 1}
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckConfig_HttpCheck) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckConfig_HttpCheck.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckConfig_HttpCheck.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckConfig_HttpCheck proto.InternalMessageInfo
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool {
|
||||
if m != nil {
|
||||
return m.UseSsl
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck) GetPath() string {
|
||||
if m != nil {
|
||||
return m.Path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck) GetPort() int32 {
|
||||
if m != nil {
|
||||
return m.Port
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication {
|
||||
if m != nil {
|
||||
return m.AuthInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool {
|
||||
if m != nil {
|
||||
return m.MaskHeaders
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string {
|
||||
if m != nil {
|
||||
return m.Headers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A type of authentication to perform against the specified resource or URL
|
||||
// that uses username and password.
|
||||
// Currently, only Basic authentication is supported in Uptime Monitoring.
|
||||
type UptimeCheckConfig_HttpCheck_BasicAuthentication struct {
|
||||
// The username to authenticate.
|
||||
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
|
||||
// The password to authenticate.
|
||||
Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() {
|
||||
*m = UptimeCheckConfig_HttpCheck_BasicAuthentication{}
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string {
|
||||
return proto.CompactTextString(m)
|
||||
}
|
||||
func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {}
|
||||
func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0, 1, 0}
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication proto.InternalMessageInfo
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string {
|
||||
if m != nil {
|
||||
return m.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string {
|
||||
if m != nil {
|
||||
return m.Password
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Information required for a TCP uptime check request.
|
||||
type UptimeCheckConfig_TcpCheck struct {
|
||||
// The port to the page to run the check against. Will be combined with host
|
||||
// (specified within the MonitoredResource) to construct the full URL.
|
||||
// Required.
|
||||
Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} }
|
||||
func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) }
|
||||
func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {}
|
||||
func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0, 2}
|
||||
}
|
||||
func (m *UptimeCheckConfig_TcpCheck) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckConfig_TcpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckConfig_TcpCheck) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckConfig_TcpCheck.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckConfig_TcpCheck) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckConfig_TcpCheck) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckConfig_TcpCheck.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckConfig_TcpCheck proto.InternalMessageInfo
|
||||
|
||||
func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 {
|
||||
if m != nil {
|
||||
return m.Port
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Used to perform string matching. Currently, this matches on the exact
|
||||
// content. In the future, it can be expanded to allow for regular expressions
|
||||
// and more complex matching.
|
||||
type UptimeCheckConfig_ContentMatcher struct {
|
||||
// String content to match
|
||||
Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckConfig_ContentMatcher{} }
|
||||
func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) }
|
||||
func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {}
|
||||
func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0, 3}
|
||||
}
|
||||
func (m *UptimeCheckConfig_ContentMatcher) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckConfig_ContentMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckConfig_ContentMatcher) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckConfig_ContentMatcher) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckConfig_ContentMatcher) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckConfig_ContentMatcher.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckConfig_ContentMatcher proto.InternalMessageInfo
|
||||
|
||||
func (m *UptimeCheckConfig_ContentMatcher) GetContent() string {
|
||||
if m != nil {
|
||||
return m.Content
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Nimbus InternalCheckers.
|
||||
type UptimeCheckConfig_InternalChecker struct {
|
||||
// The GCP project ID. Not necessarily the same as the project_id for the config.
|
||||
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||||
// The internal network to perform this uptime check on.
|
||||
Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"`
|
||||
// The GCP zone the uptime check should egress from. Only respected for
|
||||
// internal uptime checks, where internal_network is specified.
|
||||
GcpZone string `protobuf:"bytes,3,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"`
|
||||
// The checker ID.
|
||||
CheckerId string `protobuf:"bytes,4,opt,name=checker_id,json=checkerId,proto3" json:"checker_id,omitempty"`
|
||||
// The checker's human-readable name.
|
||||
DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_InternalChecker) Reset() { *m = UptimeCheckConfig_InternalChecker{} }
|
||||
func (m *UptimeCheckConfig_InternalChecker) String() string { return proto.CompactTextString(m) }
|
||||
func (*UptimeCheckConfig_InternalChecker) ProtoMessage() {}
|
||||
func (*UptimeCheckConfig_InternalChecker) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{0, 4}
|
||||
}
|
||||
func (m *UptimeCheckConfig_InternalChecker) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckConfig_InternalChecker.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckConfig_InternalChecker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckConfig_InternalChecker.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckConfig_InternalChecker) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckConfig_InternalChecker.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckConfig_InternalChecker) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckConfig_InternalChecker.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckConfig_InternalChecker) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckConfig_InternalChecker.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckConfig_InternalChecker proto.InternalMessageInfo
|
||||
|
||||
func (m *UptimeCheckConfig_InternalChecker) GetProjectId() string {
|
||||
if m != nil {
|
||||
return m.ProjectId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_InternalChecker) GetNetwork() string {
|
||||
if m != nil {
|
||||
return m.Network
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_InternalChecker) GetGcpZone() string {
|
||||
if m != nil {
|
||||
return m.GcpZone
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_InternalChecker) GetCheckerId() string {
|
||||
if m != nil {
|
||||
return m.CheckerId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckConfig_InternalChecker) GetDisplayName() string {
|
||||
if m != nil {
|
||||
return m.DisplayName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Contains the region, location, and list of IP
|
||||
// addresses where checkers in the location run from.
|
||||
type UptimeCheckIp struct {
|
||||
// A broad region category in which the IP address is located.
|
||||
Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"`
|
||||
// A more specific location within the region that typically encodes
|
||||
// a particular city/town/metro (and its containing state/province or country)
|
||||
// within the broader umbrella region category.
|
||||
Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
|
||||
// The IP address from which the uptime check originates. This is a full
|
||||
// IP address (not an IP address range). Most IP addresses, as of this
|
||||
// publication, are in IPv4 format; however, one should not rely on the
|
||||
// IP addresses being in IPv4 format indefinitely and should support
|
||||
// interpreting this field in either IPv4 or IPv6 format.
|
||||
IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} }
|
||||
func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) }
|
||||
func (*UptimeCheckIp) ProtoMessage() {}
|
||||
func (*UptimeCheckIp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_70741699aaca8abd, []int{1}
|
||||
}
|
||||
func (m *UptimeCheckIp) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UptimeCheckIp.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UptimeCheckIp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UptimeCheckIp.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UptimeCheckIp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UptimeCheckIp.Merge(dst, src)
|
||||
}
|
||||
func (m *UptimeCheckIp) XXX_Size() int {
|
||||
return xxx_messageInfo_UptimeCheckIp.Size(m)
|
||||
}
|
||||
func (m *UptimeCheckIp) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UptimeCheckIp.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UptimeCheckIp proto.InternalMessageInfo
|
||||
|
||||
func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion {
|
||||
if m != nil {
|
||||
return m.Region
|
||||
}
|
||||
return UptimeCheckRegion_REGION_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (m *UptimeCheckIp) GetLocation() string {
|
||||
if m != nil {
|
||||
return m.Location
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UptimeCheckIp) GetIpAddress() string {
|
||||
if m != nil {
|
||||
return m.IpAddress
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*UptimeCheckConfig)(nil), "google.monitoring.v3.UptimeCheckConfig")
|
||||
proto.RegisterType((*UptimeCheckConfig_ResourceGroup)(nil), "google.monitoring.v3.UptimeCheckConfig.ResourceGroup")
|
||||
proto.RegisterType((*UptimeCheckConfig_HttpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck")
|
||||
proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry")
|
||||
proto.RegisterType((*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication")
|
||||
proto.RegisterType((*UptimeCheckConfig_TcpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.TcpCheck")
|
||||
proto.RegisterType((*UptimeCheckConfig_ContentMatcher)(nil), "google.monitoring.v3.UptimeCheckConfig.ContentMatcher")
|
||||
proto.RegisterType((*UptimeCheckConfig_InternalChecker)(nil), "google.monitoring.v3.UptimeCheckConfig.InternalChecker")
|
||||
proto.RegisterType((*UptimeCheckIp)(nil), "google.monitoring.v3.UptimeCheckIp")
|
||||
proto.RegisterEnum("google.monitoring.v3.UptimeCheckRegion", UptimeCheckRegion_name, UptimeCheckRegion_value)
|
||||
proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor_uptime_70741699aaca8abd)
|
||||
}
|
||||
|
||||
var fileDescriptor_uptime_70741699aaca8abd = []byte{
|
||||
// 1021 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xdd, 0x4e, 0xe3, 0x46,
|
||||
0x14, 0x5e, 0x13, 0xc8, 0xcf, 0x21, 0xb0, 0x66, 0x4a, 0xdb, 0x60, 0x89, 0x15, 0xbb, 0xbd, 0x28,
|
||||
0xe2, 0xc2, 0xe9, 0x12, 0xf5, 0x47, 0x5b, 0x69, 0x2b, 0x27, 0xb8, 0xc4, 0x12, 0x24, 0xd1, 0x84,
|
||||
0x6c, 0xdb, 0x2d, 0xaa, 0x65, 0xec, 0x21, 0x71, 0x49, 0x3c, 0xae, 0x67, 0xcc, 0x96, 0xbe, 0x42,
|
||||
0x1f, 0xa3, 0x17, 0x95, 0xfa, 0x04, 0x7d, 0x86, 0xbe, 0x4d, 0xdf, 0xa0, 0x9a, 0xf1, 0x4c, 0x20,
|
||||
0x40, 0xb5, 0x70, 0x37, 0xdf, 0xf9, 0xf9, 0xe6, 0x1c, 0x9f, 0x9f, 0x31, 0x3c, 0x1f, 0x53, 0x3a,
|
||||
0x9e, 0x92, 0xe6, 0x8c, 0x26, 0x31, 0xa7, 0x59, 0x9c, 0x8c, 0x9b, 0x97, 0xad, 0x66, 0x9e, 0xf2,
|
||||
0x78, 0x46, 0xec, 0x34, 0xa3, 0x9c, 0xa2, 0xcd, 0xc2, 0xc4, 0xbe, 0x36, 0xb1, 0x2f, 0x5b, 0xd6,
|
||||
0x27, 0xca, 0x31, 0x48, 0x63, 0xed, 0x4c, 0x22, 0x3f, 0x23, 0x8c, 0xe6, 0x59, 0xa8, 0x5c, 0xad,
|
||||
0x67, 0xca, 0x48, 0xa2, 0xb3, 0xfc, 0xbc, 0x19, 0xe5, 0x59, 0xc0, 0x63, 0x9a, 0x14, 0xfa, 0x17,
|
||||
0xff, 0xd6, 0x61, 0x63, 0x24, 0xef, 0xea, 0x4c, 0x48, 0x78, 0xd1, 0xa1, 0xc9, 0x79, 0x3c, 0x46,
|
||||
0x08, 0x96, 0x93, 0x60, 0x46, 0x1a, 0xc6, 0x8e, 0xb1, 0x5b, 0xc3, 0xf2, 0x8c, 0x9e, 0x43, 0x3d,
|
||||
0x8a, 0x59, 0x3a, 0x0d, 0xae, 0x7c, 0xa9, 0x5b, 0x92, 0xba, 0x55, 0x25, 0xeb, 0x09, 0x93, 0x1e,
|
||||
0xa0, 0xbb, 0x81, 0x34, 0x4a, 0x3b, 0xc6, 0xee, 0xea, 0xfe, 0xb6, 0xad, 0x92, 0x08, 0xd2, 0xd8,
|
||||
0x3e, 0xd6, 0x56, 0x58, 0x19, 0x75, 0x9f, 0xe0, 0x8d, 0xd9, 0x6d, 0x21, 0xfa, 0x09, 0xd6, 0x35,
|
||||
0x8b, 0x3f, 0xce, 0x68, 0x9e, 0x36, 0x96, 0x25, 0xd7, 0xe7, 0xf6, 0x7d, 0x1f, 0xc4, 0xbe, 0x93,
|
||||
0x87, 0xad, 0x99, 0x0e, 0x85, 0x73, 0xf7, 0x09, 0x5e, 0xcb, 0x6e, 0x0a, 0x10, 0x06, 0x98, 0x70,
|
||||
0x9e, 0xfa, 0xa1, 0x70, 0x69, 0xac, 0x48, 0xee, 0x97, 0x0f, 0xe5, 0xee, 0x72, 0x9e, 0x4a, 0xdc,
|
||||
0x35, 0x70, 0x6d, 0xa2, 0x01, 0xea, 0x43, 0x8d, 0x87, 0x9a, 0xb2, 0x2c, 0x29, 0x3f, 0x7b, 0x28,
|
||||
0xe5, 0x49, 0x38, 0x67, 0xac, 0x72, 0x75, 0x46, 0x2f, 0xa1, 0x9c, 0x92, 0x2c, 0xa6, 0x51, 0xa3,
|
||||
0x22, 0xd9, 0xb6, 0x34, 0x9b, 0x2e, 0xa9, 0x7d, 0xa0, 0x4a, 0x8a, 0x95, 0x21, 0x6a, 0x41, 0x45,
|
||||
0x50, 0xd3, 0x9c, 0x37, 0xaa, 0xef, 0xf3, 0xd1, 0x96, 0x28, 0x00, 0x33, 0xa4, 0x09, 0x27, 0x09,
|
||||
0xf7, 0x67, 0x01, 0x0f, 0x27, 0x24, 0x63, 0x8d, 0xda, 0x4e, 0x69, 0x77, 0x75, 0xff, 0x8b, 0x87,
|
||||
0xc6, 0xdf, 0x29, 0xfc, 0x8f, 0x0b, 0x77, 0xfc, 0x34, 0x5c, 0xc0, 0x0c, 0x61, 0x30, 0x19, 0x99,
|
||||
0x92, 0x90, 0xcb, 0xf6, 0x18, 0xc7, 0x34, 0x61, 0x0d, 0xd8, 0x29, 0xed, 0xae, 0xef, 0x7f, 0xfa,
|
||||
0xde, 0x2b, 0xb0, 0xb4, 0xc7, 0x4f, 0x35, 0x41, 0x81, 0x19, 0x8a, 0x60, 0x23, 0x4e, 0x38, 0xc9,
|
||||
0x92, 0x60, 0x5a, 0x7c, 0x74, 0x11, 0xf7, 0xba, 0x8c, 0xfb, 0xcb, 0x87, 0xc6, 0xed, 0x29, 0x82,
|
||||
0x4e, 0xe1, 0x8f, 0xcd, 0x78, 0x51, 0xc0, 0xac, 0x5f, 0x61, 0x6d, 0xa1, 0x97, 0xd0, 0x16, 0x54,
|
||||
0x65, 0x47, 0xfa, 0x71, 0xa4, 0xa6, 0xa4, 0x22, 0xb1, 0x17, 0xa1, 0x23, 0x98, 0xb7, 0x99, 0xcf,
|
||||
0xaf, 0xd2, 0x62, 0x52, 0xfe, 0x37, 0x45, 0x49, 0xa7, 0xb9, 0x4f, 0xae, 0x52, 0x82, 0xeb, 0xd9,
|
||||
0x0d, 0x64, 0xfd, 0x5d, 0x82, 0xda, 0xbc, 0xd5, 0xd0, 0xc7, 0x50, 0xc9, 0x19, 0xf1, 0x19, 0x9b,
|
||||
0xca, 0x5b, 0xab, 0xb8, 0x9c, 0x33, 0x32, 0x64, 0x53, 0x31, 0xb1, 0x69, 0xc0, 0x27, 0x6a, 0x2a,
|
||||
0xe5, 0x59, 0xca, 0x68, 0xc6, 0xe5, 0x00, 0xae, 0x60, 0x79, 0x46, 0x67, 0x50, 0x0b, 0x72, 0x3e,
|
||||
0xf1, 0xe3, 0xe4, 0x9c, 0xaa, 0x69, 0x72, 0x1f, 0xdd, 0xf1, 0x76, 0x3b, 0x60, 0x71, 0xe8, 0xe4,
|
||||
0x7c, 0x42, 0x12, 0x1e, 0x87, 0x45, 0x23, 0x55, 0x05, 0xaf, 0x97, 0x9c, 0x53, 0xb1, 0x29, 0x66,
|
||||
0x01, 0xbb, 0xf0, 0x27, 0x24, 0x88, 0x44, 0x35, 0x56, 0x64, 0xa4, 0xab, 0x42, 0xd6, 0x2d, 0x44,
|
||||
0xe8, 0x7b, 0xa8, 0x68, 0x6d, 0x59, 0xd6, 0xea, 0xf5, 0xe3, 0x83, 0x50, 0x5c, 0x6e, 0xc2, 0xb3,
|
||||
0x2b, 0xac, 0xe9, 0xac, 0x63, 0xf8, 0xe0, 0x9e, 0xe8, 0x90, 0x05, 0xd5, 0x9c, 0x89, 0x9a, 0xce,
|
||||
0xb7, 0xda, 0x1c, 0x0b, 0x5d, 0x1a, 0x30, 0xf6, 0x8e, 0x66, 0x91, 0xfa, 0x7e, 0x73, 0x6c, 0xbd,
|
||||
0x82, 0xfa, 0xcd, 0x7b, 0x90, 0x09, 0xa5, 0x0b, 0x72, 0xa5, 0x28, 0xc4, 0x11, 0x6d, 0xc2, 0xca,
|
||||
0x65, 0x30, 0xcd, 0xf5, 0x42, 0x2c, 0xc0, 0xab, 0xa5, 0xaf, 0x0c, 0xeb, 0x19, 0x54, 0xf5, 0x44,
|
||||
0xcf, 0x6b, 0x61, 0x5c, 0xd7, 0xc2, 0xda, 0x83, 0xf5, 0xc5, 0x89, 0x41, 0x0d, 0xa8, 0xa8, 0x99,
|
||||
0xd1, 0x4d, 0xa5, 0xa0, 0xf5, 0xa7, 0x01, 0x4f, 0x6f, 0xb5, 0x29, 0xda, 0x06, 0x48, 0x33, 0xfa,
|
||||
0x33, 0x09, 0xf9, 0x75, 0x17, 0xd6, 0x94, 0xc4, 0x8b, 0x04, 0x59, 0x42, 0xf8, 0x3b, 0x9a, 0x5d,
|
||||
0xa8, 0xd0, 0x34, 0x94, 0xcd, 0x1b, 0xa6, 0xfe, 0x6f, 0x34, 0x29, 0xb6, 0xb3, 0x68, 0xde, 0x30,
|
||||
0x7d, 0x4b, 0x13, 0x22, 0x38, 0xd5, 0x14, 0x09, 0xce, 0xe5, 0x82, 0x53, 0x49, 0xbc, 0xe8, 0xce,
|
||||
0x23, 0xb0, 0x72, 0xe7, 0x11, 0x68, 0x03, 0x54, 0x75, 0x03, 0xb7, 0x37, 0x01, 0x49, 0x5f, 0x3f,
|
||||
0x23, 0xbf, 0xe4, 0x84, 0x71, 0x39, 0x0f, 0x2f, 0x7e, 0x37, 0x60, 0xed, 0x46, 0x61, 0xbd, 0x14,
|
||||
0x7d, 0x03, 0xe5, 0x62, 0x1f, 0xc8, 0x2c, 0x1e, 0xb1, 0x0e, 0x94, 0x9b, 0x28, 0xe1, 0x94, 0x16,
|
||||
0xa5, 0xd6, 0x25, 0xd4, 0x58, 0xa4, 0x14, 0xa7, 0x7e, 0x10, 0x45, 0x19, 0x61, 0x4c, 0xe5, 0x5b,
|
||||
0x8b, 0x53, 0xa7, 0x10, 0xec, 0x91, 0x85, 0x07, 0xb0, 0xe0, 0x45, 0x1f, 0x01, 0xc2, 0xee, 0xa1,
|
||||
0xd7, 0xef, 0xf9, 0xa3, 0xde, 0x70, 0xe0, 0x76, 0xbc, 0x6f, 0x3d, 0xf7, 0xc0, 0x7c, 0x82, 0x2a,
|
||||
0x50, 0x1a, 0x0d, 0x1d, 0xd3, 0x40, 0x00, 0x65, 0x77, 0x84, 0xfb, 0x03, 0xd7, 0x5c, 0x42, 0x1b,
|
||||
0xb0, 0x36, 0xec, 0x8f, 0x4e, 0xba, 0xbe, 0x73, 0xec, 0x62, 0xaf, 0xe3, 0x98, 0x25, 0x64, 0x42,
|
||||
0xdd, 0x19, 0x7a, 0x8e, 0x3f, 0x70, 0x84, 0x6b, 0xc7, 0x5c, 0xde, 0xfb, 0x11, 0x36, 0xee, 0x8c,
|
||||
0x3a, 0xda, 0x86, 0x2d, 0xec, 0x0e, 0xfb, 0x23, 0xdc, 0x71, 0xfd, 0x93, 0x1f, 0x06, 0xee, 0xad,
|
||||
0xdb, 0xea, 0x50, 0xf5, 0x7a, 0xc3, 0x13, 0xa7, 0xd7, 0x71, 0x4d, 0x03, 0x6d, 0xc1, 0x87, 0xce,
|
||||
0x77, 0x43, 0xdf, 0x3d, 0x6a, 0xfb, 0x47, 0x7d, 0xe7, 0xc0, 0x6f, 0x3b, 0x47, 0x42, 0x83, 0xcd,
|
||||
0xa5, 0xf6, 0x1f, 0x06, 0x34, 0x42, 0x3a, 0xbb, 0xf7, 0xab, 0xb5, 0x57, 0x8b, 0xf4, 0x06, 0x62,
|
||||
0xf5, 0x0f, 0x8c, 0xb7, 0xaf, 0x95, 0xd1, 0x98, 0x4e, 0x83, 0x64, 0x6c, 0xd3, 0x6c, 0xdc, 0x1c,
|
||||
0x93, 0x44, 0x3e, 0x0c, 0xcd, 0x42, 0x15, 0xa4, 0x31, 0x5b, 0xfc, 0x1d, 0xf9, 0xfa, 0x1a, 0xfd,
|
||||
0xb5, 0x64, 0x1d, 0x16, 0x04, 0x9d, 0x29, 0xcd, 0x23, 0xfd, 0x94, 0x8b, 0xbb, 0xde, 0xb4, 0xfe,
|
||||
0xd1, 0xca, 0x53, 0xa9, 0x3c, 0xbd, 0x56, 0x9e, 0xbe, 0x69, 0x9d, 0x95, 0xe5, 0x25, 0xad, 0xff,
|
||||
0x02, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xa5, 0xbc, 0x87, 0xf2, 0x08, 0x00, 0x00,
|
||||
}
|
775
vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
generated
vendored
Normal file
775
vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
generated
vendored
Normal file
@@ -0,0 +1,775 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/monitoring/v3/uptime_service.proto
|
||||
|
||||
package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import empty "github.com/golang/protobuf/ptypes/empty"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import field_mask "google.golang.org/genproto/protobuf/field_mask"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The protocol for the `ListUptimeCheckConfigs` request.
|
||||
type ListUptimeCheckConfigsRequest struct {
|
||||
// The project whose uptime check configurations are listed. The format is
|
||||
//
|
||||
// `projects/[PROJECT_ID]`.
|
||||
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
|
||||
// The maximum number of results to return in a single response. The server
|
||||
// may further constrain the maximum number of results returned in a single
|
||||
// page. If the page_size is <=0, the server will decide the number of results
|
||||
// to be returned.
|
||||
PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
// If this field is not empty then it must contain the `nextPageToken` value
|
||||
// returned by a previous call to this method. Using this field causes the
|
||||
// method to return more results from the previous method call.
|
||||
PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} }
|
||||
func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListUptimeCheckConfigsRequest) ProtoMessage() {}
|
||||
func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{0}
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListUptimeCheckConfigsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListUptimeCheckConfigsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListUptimeCheckConfigsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListUptimeCheckConfigsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListUptimeCheckConfigsRequest.Size(m)
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListUptimeCheckConfigsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListUptimeCheckConfigsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListUptimeCheckConfigsRequest) GetParent() string {
|
||||
if m != nil {
|
||||
return m.Parent
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckConfigsRequest) GetPageSize() int32 {
|
||||
if m != nil {
|
||||
return m.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckConfigsRequest) GetPageToken() string {
|
||||
if m != nil {
|
||||
return m.PageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `ListUptimeCheckConfigs` response.
|
||||
type ListUptimeCheckConfigsResponse struct {
|
||||
// The returned uptime check configurations.
|
||||
UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"`
|
||||
// This field represents the pagination token to retrieve the next page of
|
||||
// results. If the value is empty, it means no further results for the
|
||||
// request. To retrieve the next page of results, the value of the
|
||||
// next_page_token is passed to the subsequent List method call (in the
|
||||
// request message's page_token field).
|
||||
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} }
|
||||
func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListUptimeCheckConfigsResponse) ProtoMessage() {}
|
||||
func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{1}
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListUptimeCheckConfigsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListUptimeCheckConfigsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListUptimeCheckConfigsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListUptimeCheckConfigsResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListUptimeCheckConfigsResponse.Size(m)
|
||||
}
|
||||
func (m *ListUptimeCheckConfigsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListUptimeCheckConfigsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListUptimeCheckConfigsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig {
|
||||
if m != nil {
|
||||
return m.UptimeCheckConfigs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckConfigsResponse) GetNextPageToken() string {
|
||||
if m != nil {
|
||||
return m.NextPageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `GetUptimeCheckConfig` request.
|
||||
type GetUptimeCheckConfigRequest struct {
|
||||
// The uptime check configuration to retrieve. The format is
|
||||
//
|
||||
// `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} }
|
||||
func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetUptimeCheckConfigRequest) ProtoMessage() {}
|
||||
func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{2}
|
||||
}
|
||||
func (m *GetUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetUptimeCheckConfigRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GetUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetUptimeCheckConfigRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *GetUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetUptimeCheckConfigRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *GetUptimeCheckConfigRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetUptimeCheckConfigRequest.Size(m)
|
||||
}
|
||||
func (m *GetUptimeCheckConfigRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GetUptimeCheckConfigRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GetUptimeCheckConfigRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *GetUptimeCheckConfigRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `CreateUptimeCheckConfig` request.
|
||||
type CreateUptimeCheckConfigRequest struct {
|
||||
// The project in which to create the uptime check. The format is:
|
||||
//
|
||||
// `projects/[PROJECT_ID]`.
|
||||
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
|
||||
// The new uptime check configuration.
|
||||
UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} }
|
||||
func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateUptimeCheckConfigRequest) ProtoMessage() {}
|
||||
func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{3}
|
||||
}
|
||||
func (m *CreateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateUptimeCheckConfigRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CreateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateUptimeCheckConfigRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *CreateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateUptimeCheckConfigRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *CreateUptimeCheckConfigRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateUptimeCheckConfigRequest.Size(m)
|
||||
}
|
||||
func (m *CreateUptimeCheckConfigRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateUptimeCheckConfigRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateUptimeCheckConfigRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateUptimeCheckConfigRequest) GetParent() string {
|
||||
if m != nil {
|
||||
return m.Parent
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
|
||||
if m != nil {
|
||||
return m.UptimeCheckConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The protocol for the `UpdateUptimeCheckConfig` request.
|
||||
type UpdateUptimeCheckConfigRequest struct {
|
||||
// Optional. If present, only the listed fields in the current uptime check
|
||||
// configuration are updated with values from the new configuration. If this
|
||||
// field is empty, then the current configuration is completely replaced with
|
||||
// the new configuration.
|
||||
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
|
||||
// Required. If an `"updateMask"` has been specified, this field gives
|
||||
// the values for the set of fields mentioned in the `"updateMask"`. If an
|
||||
// `"updateMask"` has not been given, this uptime check configuration replaces
|
||||
// the current configuration. If a field is mentioned in `"updateMask`" but
|
||||
// the corresonding field is omitted in this partial uptime check
|
||||
// configuration, it has the effect of deleting/clearing the field from the
|
||||
// configuration on the server.
|
||||
UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} }
|
||||
func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {}
|
||||
func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{4}
|
||||
}
|
||||
func (m *UpdateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UpdateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *UpdateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UpdateUptimeCheckConfigRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *UpdateUptimeCheckConfigRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Size(m)
|
||||
}
|
||||
func (m *UpdateUptimeCheckConfigRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UpdateUptimeCheckConfigRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UpdateUptimeCheckConfigRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *field_mask.FieldMask {
|
||||
if m != nil {
|
||||
return m.UpdateMask
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
|
||||
if m != nil {
|
||||
return m.UptimeCheckConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The protocol for the `DeleteUptimeCheckConfig` request.
|
||||
type DeleteUptimeCheckConfigRequest struct {
|
||||
// The uptime check configuration to delete. The format is
|
||||
//
|
||||
// `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} }
|
||||
func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {}
|
||||
func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{5}
|
||||
}
|
||||
func (m *DeleteUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DeleteUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *DeleteUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteUptimeCheckConfigRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *DeleteUptimeCheckConfigRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Size(m)
|
||||
}
|
||||
func (m *DeleteUptimeCheckConfigRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DeleteUptimeCheckConfigRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DeleteUptimeCheckConfigRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DeleteUptimeCheckConfigRequest) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `ListUptimeCheckIps` request.
|
||||
type ListUptimeCheckIpsRequest struct {
|
||||
// The maximum number of results to return in a single response. The server
|
||||
// may further constrain the maximum number of results returned in a single
|
||||
// page. If the page_size is <=0, the server will decide the number of results
|
||||
// to be returned.
|
||||
// NOTE: this field is not yet implemented
|
||||
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
// If this field is not empty then it must contain the `nextPageToken` value
|
||||
// returned by a previous call to this method. Using this field causes the
|
||||
// method to return more results from the previous method call.
|
||||
// NOTE: this field is not yet implemented
|
||||
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} }
|
||||
func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListUptimeCheckIpsRequest) ProtoMessage() {}
|
||||
func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{6}
|
||||
}
|
||||
func (m *ListUptimeCheckIpsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListUptimeCheckIpsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListUptimeCheckIpsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListUptimeCheckIpsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListUptimeCheckIpsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListUptimeCheckIpsRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *ListUptimeCheckIpsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListUptimeCheckIpsRequest.Size(m)
|
||||
}
|
||||
func (m *ListUptimeCheckIpsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListUptimeCheckIpsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListUptimeCheckIpsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 {
|
||||
if m != nil {
|
||||
return m.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckIpsRequest) GetPageToken() string {
|
||||
if m != nil {
|
||||
return m.PageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The protocol for the `ListUptimeCheckIps` response.
|
||||
type ListUptimeCheckIpsResponse struct {
|
||||
// The returned list of IP addresses (including region and location) that the
|
||||
// checkers run from.
|
||||
UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"`
|
||||
// This field represents the pagination token to retrieve the next page of
|
||||
// results. If the value is empty, it means no further results for the
|
||||
// request. To retrieve the next page of results, the value of the
|
||||
// next_page_token is passed to the subsequent List method call (in the
|
||||
// request message's page_token field).
|
||||
// NOTE: this field is not yet implemented
|
||||
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} }
|
||||
func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListUptimeCheckIpsResponse) ProtoMessage() {}
|
||||
func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_uptime_service_3d7db44c876ec85d, []int{7}
|
||||
}
|
||||
func (m *ListUptimeCheckIpsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListUptimeCheckIpsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListUptimeCheckIpsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListUptimeCheckIpsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ListUptimeCheckIpsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListUptimeCheckIpsResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *ListUptimeCheckIpsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListUptimeCheckIpsResponse.Size(m)
|
||||
}
|
||||
func (m *ListUptimeCheckIpsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListUptimeCheckIpsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListUptimeCheckIpsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp {
|
||||
if m != nil {
|
||||
return m.UptimeCheckIps
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ListUptimeCheckIpsResponse) GetNextPageToken() string {
|
||||
if m != nil {
|
||||
return m.NextPageToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ListUptimeCheckConfigsRequest)(nil), "google.monitoring.v3.ListUptimeCheckConfigsRequest")
|
||||
proto.RegisterType((*ListUptimeCheckConfigsResponse)(nil), "google.monitoring.v3.ListUptimeCheckConfigsResponse")
|
||||
proto.RegisterType((*GetUptimeCheckConfigRequest)(nil), "google.monitoring.v3.GetUptimeCheckConfigRequest")
|
||||
proto.RegisterType((*CreateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.CreateUptimeCheckConfigRequest")
|
||||
proto.RegisterType((*UpdateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.UpdateUptimeCheckConfigRequest")
|
||||
proto.RegisterType((*DeleteUptimeCheckConfigRequest)(nil), "google.monitoring.v3.DeleteUptimeCheckConfigRequest")
|
||||
proto.RegisterType((*ListUptimeCheckIpsRequest)(nil), "google.monitoring.v3.ListUptimeCheckIpsRequest")
|
||||
proto.RegisterType((*ListUptimeCheckIpsResponse)(nil), "google.monitoring.v3.ListUptimeCheckIpsResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// UptimeCheckServiceClient is the client API for UptimeCheckService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type UptimeCheckServiceClient interface {
|
||||
// Lists the existing valid uptime check configurations for the project,
|
||||
// leaving out any invalid configurations.
|
||||
ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error)
|
||||
// Gets a single uptime check configuration.
|
||||
GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
|
||||
// Creates a new uptime check configuration.
|
||||
CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
|
||||
// Updates an uptime check configuration. You can either replace the entire
|
||||
// configuration with a new one or replace only certain fields in the current
|
||||
// configuration by specifying the fields to be updated via `"updateMask"`.
|
||||
// Returns the updated configuration.
|
||||
UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
|
||||
// Deletes an uptime check configuration. Note that this method will fail
|
||||
// if the uptime check configuration is referenced by an alert policy or
|
||||
// other dependent configs that would be rendered invalid by the deletion.
|
||||
DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Returns the list of IPs that checkers run from
|
||||
ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error)
|
||||
}
|
||||
|
||||
type uptimeCheckServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewUptimeCheckServiceClient(cc *grpc.ClientConn) UptimeCheckServiceClient {
|
||||
return &uptimeCheckServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) {
|
||||
out := new(ListUptimeCheckConfigsResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
|
||||
out := new(UptimeCheckConfig)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
|
||||
out := new(UptimeCheckConfig)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
|
||||
out := new(UptimeCheckConfig)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) {
|
||||
out := new(ListUptimeCheckIpsResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UptimeCheckServiceServer is the server API for UptimeCheckService service.
|
||||
type UptimeCheckServiceServer interface {
|
||||
// Lists the existing valid uptime check configurations for the project,
|
||||
// leaving out any invalid configurations.
|
||||
ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error)
|
||||
// Gets a single uptime check configuration.
|
||||
GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
|
||||
// Creates a new uptime check configuration.
|
||||
CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
|
||||
// Updates an uptime check configuration. You can either replace the entire
|
||||
// configuration with a new one or replace only certain fields in the current
|
||||
// configuration by specifying the fields to be updated via `"updateMask"`.
|
||||
// Returns the updated configuration.
|
||||
UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
|
||||
// Deletes an uptime check configuration. Note that this method will fail
|
||||
// if the uptime check configuration is referenced by an alert policy or
|
||||
// other dependent configs that would be rendered invalid by the deletion.
|
||||
DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*empty.Empty, error)
|
||||
// Returns the list of IPs that checkers run from
|
||||
ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error)
|
||||
}
|
||||
|
||||
func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) {
|
||||
s.RegisterService(&_UptimeCheckService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListUptimeCheckConfigsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetUptimeCheckConfigRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateUptimeCheckConfigRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateUptimeCheckConfigRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteUptimeCheckConfigRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListUptimeCheckIpsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.monitoring.v3.UptimeCheckService",
|
||||
HandlerType: (*UptimeCheckServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ListUptimeCheckConfigs",
|
||||
Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetUptimeCheckConfig",
|
||||
Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateUptimeCheckConfig",
|
||||
Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateUptimeCheckConfig",
|
||||
Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteUptimeCheckConfig",
|
||||
Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListUptimeCheckIps",
|
||||
Handler: _UptimeCheckService_ListUptimeCheckIps_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/monitoring/v3/uptime_service.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor_uptime_service_3d7db44c876ec85d)
|
||||
}
|
||||
|
||||
var fileDescriptor_uptime_service_3d7db44c876ec85d = []byte{
|
||||
// 735 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x4e, 0x13, 0x4f,
|
||||
0x14, 0xce, 0xb4, 0xfc, 0x08, 0x1c, 0xf2, 0xf3, 0xcf, 0xd8, 0x40, 0x5d, 0xa4, 0xa9, 0x35, 0x51,
|
||||
0x6c, 0xcc, 0xae, 0xb4, 0x5c, 0x49, 0x24, 0x91, 0xaa, 0x84, 0x44, 0x12, 0x52, 0x04, 0xa2, 0x92,
|
||||
0x34, 0x4b, 0x19, 0xd6, 0xb5, 0xed, 0xce, 0xd8, 0x99, 0x25, 0x8a, 0xe1, 0xc6, 0x37, 0x30, 0x5c,
|
||||
0x7a, 0x69, 0xe2, 0x05, 0x0f, 0xa0, 0xd7, 0x5e, 0x99, 0x78, 0x6b, 0x7c, 0x03, 0x1f, 0xc4, 0xec,
|
||||
0xec, 0x2c, 0xa5, 0xed, 0xec, 0xba, 0x8d, 0x77, 0xdd, 0x39, 0x67, 0xce, 0xf9, 0xce, 0xb7, 0xdf,
|
||||
0xf9, 0xba, 0x70, 0xdb, 0xa1, 0xd4, 0x69, 0x13, 0xab, 0x43, 0x3d, 0x57, 0xd0, 0xae, 0xeb, 0x39,
|
||||
0xd6, 0x61, 0xd5, 0xf2, 0x99, 0x70, 0x3b, 0xa4, 0xc1, 0x49, 0xf7, 0xd0, 0x6d, 0x12, 0x93, 0x75,
|
||||
0xa9, 0xa0, 0x38, 0x17, 0xa6, 0x9a, 0xbd, 0x54, 0xf3, 0xb0, 0x6a, 0x5c, 0x53, 0x05, 0x6c, 0xe6,
|
||||
0x5a, 0xb6, 0xe7, 0x51, 0x61, 0x0b, 0x97, 0x7a, 0x3c, 0xbc, 0x63, 0x5c, 0x4f, 0x28, 0xaf, 0x52,
|
||||
0x66, 0x55, 0x8a, 0x7c, 0xda, 0xf3, 0x0f, 0x2c, 0xd2, 0x61, 0xe2, 0xad, 0x0a, 0x16, 0x07, 0x83,
|
||||
0x07, 0x2e, 0x69, 0xef, 0x37, 0x3a, 0x36, 0x6f, 0x85, 0x19, 0x25, 0x0e, 0x73, 0x4f, 0x5c, 0x2e,
|
||||
0xb6, 0x64, 0xc9, 0xda, 0x4b, 0xd2, 0x6c, 0xd5, 0xa8, 0x77, 0xe0, 0x3a, 0xbc, 0x4e, 0x5e, 0xfb,
|
||||
0x84, 0x0b, 0x3c, 0x0d, 0xe3, 0xcc, 0xee, 0x12, 0x4f, 0xe4, 0x51, 0x11, 0xcd, 0x4f, 0xd6, 0xd5,
|
||||
0x13, 0x9e, 0x85, 0x49, 0x66, 0x3b, 0xa4, 0xc1, 0xdd, 0x23, 0x92, 0xcf, 0x16, 0xd1, 0xfc, 0x7f,
|
||||
0xf5, 0x89, 0xe0, 0x60, 0xd3, 0x3d, 0x22, 0x78, 0x0e, 0x40, 0x06, 0x05, 0x6d, 0x11, 0x2f, 0x3f,
|
||||
0x26, 0x2f, 0xca, 0xf4, 0xa7, 0xc1, 0x41, 0xe9, 0x13, 0x82, 0x42, 0x5c, 0x57, 0xce, 0xa8, 0xc7,
|
||||
0x09, 0x7e, 0x06, 0x39, 0xc5, 0x62, 0x33, 0x08, 0x37, 0x9a, 0x61, 0x3c, 0x8f, 0x8a, 0xd9, 0xf9,
|
||||
0xa9, 0xca, 0x2d, 0x53, 0x47, 0xa6, 0x39, 0x54, 0xaf, 0x8e, 0xfd, 0xa1, 0x16, 0xf8, 0x26, 0x5c,
|
||||
0xf4, 0xc8, 0x1b, 0xd1, 0x38, 0x87, 0x30, 0x23, 0x11, 0xfe, 0x1f, 0x1c, 0x6f, 0x9c, 0xa1, 0x5c,
|
||||
0x80, 0xd9, 0x55, 0x32, 0x8c, 0x31, 0x22, 0x06, 0xc3, 0x98, 0x67, 0x77, 0x88, 0xa2, 0x45, 0xfe,
|
||||
0x2e, 0x7d, 0x40, 0x50, 0xa8, 0x75, 0x89, 0x2d, 0x48, 0xec, 0xb5, 0x38, 0x3e, 0x77, 0xe0, 0x8a,
|
||||
0x66, 0x60, 0x89, 0x6c, 0x84, 0x79, 0x2f, 0x0f, 0xcd, 0x5b, 0xfa, 0x82, 0xa0, 0xb0, 0xc5, 0xf6,
|
||||
0x93, 0x30, 0x2d, 0xc1, 0x94, 0x2f, 0x33, 0xa4, 0x32, 0x54, 0x4f, 0x23, 0xea, 0x19, 0x89, 0xc7,
|
||||
0x7c, 0x1c, 0x88, 0x67, 0xdd, 0xe6, 0xad, 0x3a, 0x84, 0xe9, 0xc1, 0xef, 0x38, 0xe0, 0xd9, 0x7f,
|
||||
0x06, 0xbe, 0x08, 0x85, 0x87, 0xa4, 0x4d, 0x12, 0x70, 0xeb, 0x5e, 0xc1, 0x0e, 0x5c, 0x1d, 0x90,
|
||||
0xd6, 0x1a, 0x3b, 0x13, 0x73, 0x9f, 0x68, 0x33, 0x89, 0xa2, 0xcd, 0x0e, 0x8a, 0xf6, 0x04, 0x81,
|
||||
0xa1, 0xab, 0xac, 0x04, 0xbb, 0x0e, 0x97, 0xfa, 0x68, 0x70, 0x59, 0x24, 0xd6, 0x1b, 0x7f, 0xe5,
|
||||
0x60, 0x8d, 0xd5, 0x2f, 0xf8, 0x7d, 0x65, 0xd3, 0x8a, 0xb4, 0xf2, 0x7d, 0x02, 0xf0, 0xb9, 0x4a,
|
||||
0x9b, 0xa1, 0xe5, 0xe0, 0xaf, 0x08, 0xa6, 0xf5, 0x1b, 0x86, 0xab, 0x7a, 0x38, 0x89, 0x2e, 0x60,
|
||||
0x2c, 0x8e, 0x76, 0x29, 0xe4, 0xa4, 0x54, 0x79, 0xff, 0xf3, 0xf7, 0x49, 0xe6, 0x0e, 0x2e, 0x07,
|
||||
0xae, 0xf5, 0x2e, 0x14, 0xfa, 0x7d, 0xd6, 0xa5, 0xaf, 0x48, 0x53, 0x70, 0xab, 0x7c, 0x6c, 0x69,
|
||||
0xb6, 0xf3, 0x33, 0x82, 0x9c, 0x6e, 0xed, 0xf0, 0x82, 0x1e, 0x42, 0xc2, 0x8a, 0x1a, 0x69, 0xd5,
|
||||
0x37, 0x00, 0x34, 0xd0, 0xd1, 0x39, 0x98, 0x1a, 0x94, 0x56, 0xf9, 0x18, 0x7f, 0x43, 0x30, 0x13,
|
||||
0xb3, 0xeb, 0x38, 0x86, 0xae, 0x64, 0x6b, 0x48, 0x0f, 0x77, 0x55, 0xc2, 0x7d, 0x50, 0x1a, 0x81,
|
||||
0xd7, 0x7b, 0xba, 0x25, 0xc5, 0xbf, 0x10, 0xcc, 0xc4, 0x78, 0x43, 0xdc, 0x0c, 0xc9, 0x56, 0x92,
|
||||
0x7e, 0x86, 0x17, 0x72, 0x86, 0xad, 0xca, 0xb2, 0x9c, 0x41, 0x03, 0xce, 0x4c, 0xf5, 0x1a, 0xf4,
|
||||
0x73, 0x7d, 0x44, 0x30, 0x13, 0xe3, 0x1d, 0x71, 0x73, 0x25, 0x5b, 0x8d, 0x31, 0x3d, 0xe4, 0x86,
|
||||
0x8f, 0x82, 0xff, 0xd9, 0x48, 0x39, 0xe5, 0x51, 0x94, 0x73, 0x82, 0x00, 0x0f, 0x3b, 0x09, 0xb6,
|
||||
0x52, 0xed, 0x58, 0xcf, 0xcd, 0x8c, 0xbb, 0xe9, 0x2f, 0xa8, 0x85, 0x34, 0x24, 0xda, 0x1c, 0xc6,
|
||||
0xbd, 0xcf, 0x88, 0x28, 0x67, 0xe5, 0x14, 0x41, 0xbe, 0x49, 0x3b, 0xda, 0x9a, 0x2b, 0xca, 0x63,
|
||||
0x94, 0xbd, 0x6c, 0x04, 0x1c, 0x6c, 0xa0, 0xe7, 0xcb, 0x2a, 0xd7, 0xa1, 0x6d, 0xdb, 0x73, 0x4c,
|
||||
0xda, 0x75, 0x2c, 0x87, 0x78, 0x92, 0x21, 0x2b, 0x0c, 0xd9, 0xcc, 0xe5, 0xfd, 0x5f, 0x2f, 0x4b,
|
||||
0xbd, 0xa7, 0xd3, 0x8c, 0xb1, 0x1a, 0x16, 0xa8, 0xb5, 0xa9, 0xbf, 0x6f, 0xae, 0xf7, 0x5a, 0x6e,
|
||||
0x57, 0x7f, 0x44, 0xc1, 0x5d, 0x19, 0xdc, 0xed, 0x05, 0x77, 0xb7, 0xab, 0x7b, 0xe3, 0xb2, 0x49,
|
||||
0xf5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x1d, 0x15, 0x69, 0x80, 0x09, 0x00, 0x00,
|
||||
}
|
Reference in New Issue
Block a user