diff --git a/Gopkg.lock b/Gopkg.lock index b4f550ab72..ab494ad499 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,14 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + digest = "1:57d862d83c6c53d8d645827878df9667ac3d1ac7f22717190a698110dfb7aabe" + name = "code.cloudfoundry.org/clock" + packages = ["."] + pruneopts = "UT" + revision = "02e53af36e6c978af692887ed449b74026d76fec" + version = "1.0.0" + [[projects]] digest = "1:f9ae348e1f793dcf9ed930ed47136a67343dbd6809c5c91391322267f4476892" name = "github.com/Microsoft/go-winio" @@ -152,6 +160,16 @@ revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" version = "v1.0.2" +[[projects]] + digest = "1:2d54ea234b5487eb9c651f55d8ccf30978b1c2116e2f547ba7cf8cb0932bacc7" + name = "github.com/microsoft/ApplicationInsights-Go" + packages = [ + "appinsights", + "appinsights/contracts", + ] + pruneopts = "UT" + revision = "d813d7725313000ad1b71627b8951323635f0572" + [[projects]] digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" name = "github.com/modern-go/concurrent" @@ -168,6 +186,14 @@ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" +[[projects]] + digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925" + name = "github.com/satori/go.uuid" + packages = ["."] + pruneopts = "UT" + revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" + version = "v1.2.0" + [[projects]] digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426" name = "github.com/sirupsen/logrus" @@ -572,6 +598,7 @@ "github.com/docker/libnetwork/driverapi", "github.com/docker/libnetwork/drivers/remote/api", "github.com/google/uuid", + "github.com/microsoft/ApplicationInsights-Go/appinsights", "golang.org/x/sys/unix", "k8s.io/api/core/v1", "k8s.io/api/networking/v1", diff --git a/Gopkg.toml b/Gopkg.toml index 90a789d15c..eeef3e55da 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -61,6 +61,10 @@ name = "github.com/containernetworking/cni" revision = "fbb95fff8a5239a4295c991efa8a397d43118f7e" +[[constraint]] + name = "github.com/microsoft/ApplicationInsights-Go" + revision = "d813d7725313000ad1b71627b8951323635f0572" + [prune] go-tests = true unused-packages = true diff --git a/Makefile b/Makefile index fd95ea0d26..2dce8cc90b 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,7 @@ COREFILES = \ $(wildcard netlink/*.go) \ $(wildcard network/*.go) \ $(wildcard telemetry/*.go) \ + $(wildcard aitelemetry/*.go) \ $(wildcard network/epcommon/*.go) \ $(wildcard network/policy/*.go) \ $(wildcard platform/*.go) \ @@ -40,6 +41,7 @@ CNSFILES = \ $(wildcard cns/dockerclient/*.go) \ $(wildcard cns/imdsclient/*.go) \ $(wildcard cns/ipamclient/*.go) \ + $(wildcard cns/hnsclient/*.go) \ $(wildcard cns/restserver/*.go) \ $(wildcard cns/routes/*.go) \ $(wildcard cns/service/*.go) \ @@ -313,6 +315,7 @@ test-all: ./netlink/ \ ./store/ \ ./telemetry/ \ + ./aitelemetry/ \ ./cnm/network/ \ ./cni/ipam/ \ ./cns/ipamclient/ \ diff --git a/aitelemetry/api.go b/aitelemetry/api.go new file mode 100644 index 0000000000..49bf864b4d --- /dev/null +++ b/aitelemetry/api.go @@ -0,0 +1,47 @@ +package aitelemetry + +import ( + "sync" + + "github.com/Azure/azure-container-networking/common" + "github.com/microsoft/ApplicationInsights-Go/appinsights" +) + +// Application trace/log structure +type Report struct { + Message string + Context string + CustomDimensions map[string]string +} + +// Application metrics structure +type Metric struct { + Name string + Value float64 + CustomDimensions map[string]string +} + +// TelmetryHandle holds appinsight handles and metadata +type telemetryHandle struct { + telemetryConfig *appinsights.TelemetryConfiguration + appName string + appVersion string + metadata common.Metadata + diagListener appinsights.DiagnosticsMessageListener + client appinsights.TelemetryClient + enableMetadataRefreshThread bool + refreshTimeout int + rwmutex sync.RWMutex +} + +// Telemetry Interface to send metrics/Logs to appinsights +type TelemetryHandle interface { + // TrackLog function sends report (trace) to appinsights resource. It overrides few of the existing columns with app information + // and for rest it uses custom dimesion + TrackLog(report Report) + // TrackMetric function sends metric to appinsights resource. It overrides few of the existing columns with app information + // and for rest it uses custom dimesion + TrackMetric(metric Metric) + // Close - should be called for each NewAITelemetry call. Will release resources acquired + Close(timeout int) +} diff --git a/aitelemetry/metadata_test.json b/aitelemetry/metadata_test.json new file mode 100644 index 0000000000..d06818bca5 --- /dev/null +++ b/aitelemetry/metadata_test.json @@ -0,0 +1 @@ +{"location":"eastus","name":"k8s-agentpool1-42685608-0","offer":"aks","osType":"Linux","placementGroupId":"","platformFaultDomain":"0","platformUpdateDomain":"0","publisher":"microsoft-aks","resourceGroupName":"rgcnideftesttamil","sku":"aks-ubuntu-1604-201902","subscriptionId":"ea821859-912a-4d20-a4dd-e18a3ce5ba2c","tags":"aksEngineVersion:canary;creationSource:aksengine-k8s-agentpool1-42685608-0;orchestrator:Kubernetes:1.10.13;poolName:agentpool1;resourceNameSuffix:42685608","version":"2019.02.12","vmId":"6baf785b-397c-4967-9f75-cdb3d0df66c4","vmSize":"Standard_DS2_v2","KernelVersion":""} diff --git a/aitelemetry/telemetrywrapper.go b/aitelemetry/telemetrywrapper.go new file mode 100644 index 0000000000..e8702f2432 --- /dev/null +++ b/aitelemetry/telemetrywrapper.go @@ -0,0 +1,176 @@ +package aitelemetry + +import ( + "runtime" + "time" + + "github.com/Azure/azure-container-networking/common" + "github.com/Azure/azure-container-networking/log" + "github.com/Azure/azure-container-networking/store" + "github.com/microsoft/ApplicationInsights-Go/appinsights" +) + +const ( + resourceGroupStr = "ResourceGroup" + vmSizeStr = "VMSize" + osVersionStr = "OSVersion" + locationStr = "Region" + appVersionStr = "Appversion" + subscriptionIDStr = "SubscriptionID" + defaultTimeout = 10 +) + +func messageListener() appinsights.DiagnosticsMessageListener { + return appinsights.NewDiagnosticsMessageListener(func(msg string) error { + log.Printf("[AppInsights] [%s] %s\n", time.Now().Format(time.UnixDate), msg) + return nil + }) +} + +func getMetadata(th *telemetryHandle) { + var metadata common.Metadata + var err error + + // check if metadata in memory otherwise initiate wireserver request + for { + metadata, err = common.GetHostMetadata(metadataFile) + if err == nil || !th.enableMetadataRefreshThread { + break + } + + log.Printf("[AppInsights] Error getting metadata %v. Sleep for %d", err, th.refreshTimeout) + time.Sleep(time.Duration(th.refreshTimeout) * time.Second) + } + + //acquire write lock before writing metadata to telemetry handle + th.rwmutex.Lock() + th.metadata = metadata + th.rwmutex.Unlock() + + // Save metadata retrieved from wireserver to a file + kvs, err := store.NewJsonFileStore(metadataFile) + if err != nil { + log.Printf("[AppInsights] Error initializing kvs store: %v", err) + return + } + + kvs.Lock(true) + err = common.SaveHostMetadata(th.metadata, metadataFile) + kvs.Unlock(true) + if err != nil { + log.Printf("[AppInsights] saving host metadata failed with :%v", err) + } +} + +// NewAITelemetry creates telemetry handle with user specified appinsights key. +func NewAITelemetry( + key string, + appName string, + appVersion string, + batchSize int, + batchInterval int, + enableMetadataRefreshThread bool, + refreshTimeout int, +) TelemetryHandle { + + telemetryConfig := appinsights.NewTelemetryConfiguration(key) + telemetryConfig.MaxBatchSize = batchSize + telemetryConfig.MaxBatchInterval = time.Duration(batchInterval) * time.Second + + th := &telemetryHandle{ + client: appinsights.NewTelemetryClientFromConfig(telemetryConfig), + appName: appName, + appVersion: appVersion, + diagListener: messageListener(), + enableMetadataRefreshThread: enableMetadataRefreshThread, + refreshTimeout: refreshTimeout, + } + + if th.enableMetadataRefreshThread { + go getMetadata(th) + } else { + getMetadata(th) + } + + return th +} + +// TrackLog function sends report (trace) to appinsights resource. It overrides few of the existing columns with app information +// and for rest it uses custom dimesion +func (th *telemetryHandle) TrackLog(report Report) { + // Initialize new trace message + trace := appinsights.NewTraceTelemetry(report.Message, appinsights.Warning) + + //Override few of existing columns with metadata + trace.Tags.User().SetAuthUserId(runtime.GOOS) + trace.Tags.Operation().SetId(report.Context) + trace.Tags.Operation().SetParentId(th.appName) + + // copy app specified custom dimension + for key, value := range report.CustomDimensions { + trace.Properties[key] = value + } + + trace.Properties[appVersionStr] = th.appVersion + + // Acquire read lock to read metadata + th.rwmutex.RLock() + metadata := th.metadata + th.rwmutex.RUnlock() + + // Check if metadata is populated + if metadata.SubscriptionID != "" { + // copy metadata from wireserver to trace + trace.Tags.User().SetAccountId(th.metadata.SubscriptionID) + trace.Tags.User().SetId(th.metadata.VMName) + trace.Properties[locationStr] = th.metadata.Location + trace.Properties[resourceGroupStr] = th.metadata.ResourceGroupName + trace.Properties[vmSizeStr] = th.metadata.VMSize + trace.Properties[osVersionStr] = th.metadata.OSVersion + } + + // send to appinsights resource + th.client.Track(trace) +} + +// TrackMetric function sends metric to appinsights resource. It overrides few of the existing columns with app information +// and for rest it uses custom dimesion +func (th *telemetryHandle) TrackMetric(metric Metric) { + // Initialize new metric + aimetric := appinsights.NewMetricTelemetry(metric.Name, metric.Value) + + // Acquire read lock to read metadata + th.rwmutex.RLock() + metadata := th.metadata + th.rwmutex.RUnlock() + + // Check if metadata is populated + if metadata.SubscriptionID != "" { + aimetric.Properties[locationStr] = th.metadata.Location + aimetric.Properties[subscriptionIDStr] = th.metadata.SubscriptionID + } + + // copy custom dimensions + for key, value := range metric.CustomDimensions { + aimetric.Properties[key] = value + } + + // send metric to appinsights + th.client.Track(aimetric) +} + +// Close - should be called for each NewAITelemetry call. Will release resources acquired +func (th *telemetryHandle) Close(timeout int) { + if timeout <= 0 { + timeout = defaultTimeout + } + + // wait for items to be sent otherwise timeout + <-th.client.Channel().Close(time.Duration(timeout) * time.Second) + + // Remove diganostic message listener + if th.diagListener != nil { + th.diagListener.Remove() + th.diagListener = nil + } +} diff --git a/aitelemetry/telemetrywrapper_linux.go b/aitelemetry/telemetrywrapper_linux.go new file mode 100644 index 0000000000..17bf74fc7c --- /dev/null +++ b/aitelemetry/telemetrywrapper_linux.go @@ -0,0 +1,5 @@ +package aitelemetry + +const ( + metadataFile = "/tmp/azuremetadata.json" +) diff --git a/aitelemetry/telemetrywrapper_test.go b/aitelemetry/telemetrywrapper_test.go new file mode 100644 index 0000000000..55d65bd037 --- /dev/null +++ b/aitelemetry/telemetrywrapper_test.go @@ -0,0 +1,63 @@ +package aitelemetry + +import ( + "os" + "runtime" + "testing" + + "github.com/Azure/azure-container-networking/platform" +) + +var th TelemetryHandle + +func TestMain(m *testing.M) { + + if runtime.GOOS == "linux" { + platform.ExecuteCommand("cp metadata_test.json /tmp/azuremetadata.json") + } else { + platform.ExecuteCommand("copy metadata_test.json azuremetadata.json") + } + + exitCode := m.Run() + + if runtime.GOOS == "linux" { + platform.ExecuteCommand("rm /tmp/azuremetadata.json") + } else { + platform.ExecuteCommand("del azuremetadata.json") + } + + os.Exit(exitCode) +} + +func TestNewAITelemetry(t *testing.T) { + th = NewAITelemetry("00ca2a73-c8d6-4929-a0c2-cf84545ec225", "testapp", "v1.0.26", 4096, 2, false, 10) + if th == nil { + t.Errorf("Error intializing AI telemetry") + } +} + +func TestTrackMetric(t *testing.T) { + metric := Metric{ + Name: "test", + Value: 1.0, + CustomDimensions: make(map[string]string), + } + + metric.CustomDimensions["dim1"] = "col1" + th.TrackMetric(metric) +} + +func TestTrackLog(t *testing.T) { + report := Report{ + Message: "test", + Context: "10a", + CustomDimensions: make(map[string]string), + } + + report.CustomDimensions["dim1"] = "col1" + th.TrackLog(report) +} + +func TestClose(t *testing.T) { + th.Close(10) +} diff --git a/aitelemetry/telemetrywrapper_windows.go b/aitelemetry/telemetrywrapper_windows.go new file mode 100644 index 0000000000..f8a337c4c2 --- /dev/null +++ b/aitelemetry/telemetrywrapper_windows.go @@ -0,0 +1,10 @@ +package aitelemetry + +import ( + "os" + "path/filepath" +) + +var ( + metadataFile = filepath.FromSlash(os.Getenv("TEMP")) + "\\azuremetadata.json" +) diff --git a/cni/network/multitenancy.go b/cni/network/multitenancy.go index a7b6ebc707..16728e252f 100644 --- a/cni/network/multitenancy.go +++ b/cni/network/multitenancy.go @@ -64,9 +64,9 @@ func getContainerNetworkConfigurationInternal( namespace string, podName string, ifName string) (*cniTypesCurr.Result, *cns.GetNetworkContainerResponse, net.IPNet, error) { - cnsClient, err := cnsclient.NewCnsClient(address) + cnsClient, err := cnsclient.GetCnsClient() if err != nil { - log.Printf("Initializing CNS client error %v", err) + log.Printf("Failed to get CNS client. Error: %v", err) return nil, nil, net.IPNet{}, err } diff --git a/cni/network/network.go b/cni/network/network.go index 83ef22c12d..ed345af664 100644 --- a/cni/network/network.go +++ b/cni/network/network.go @@ -242,6 +242,11 @@ func (plugin *netPlugin) Add(args *cniSkel.CmdArgs) error { return err } + if nwCfg.MultiTenancy { + // Initialize CNSClient + cnsclient.InitCnsClient(nwCfg.CNSUrl) + } + k8sContainerID := args.ContainerID if len(k8sContainerID) == 0 { errMsg := "Container ID not specified in CNI Args" @@ -552,6 +557,11 @@ func (plugin *netPlugin) Get(args *cniSkel.CmdArgs) error { return err } + if nwCfg.MultiTenancy { + // Initialize CNSClient + cnsclient.InitCnsClient(nwCfg.CNSUrl) + } + // Initialize values from network config. if networkId, err = getNetworkName(k8sPodName, k8sNamespace, args.IfName, nwCfg); err != nil { log.Printf("[cni-net] Failed to extract network name from network config. error: %v", err) @@ -627,6 +637,11 @@ func (plugin *netPlugin) Delete(args *cniSkel.CmdArgs) error { log.Printf("[cni-net] Failed to get POD info due to error: %v", err) } + if nwCfg.MultiTenancy { + // Initialize CNSClient + cnsclient.InitCnsClient(nwCfg.CNSUrl) + } + // Initialize values from network config. if networkId, err = getNetworkName(k8sPodName, k8sNamespace, args.IfName, nwCfg); err != nil { log.Printf("[cni-net] Failed to extract network name from network config. error: %v", err) @@ -772,7 +787,7 @@ func (plugin *netPlugin) Update(args *cniSkel.CmdArgs) error { // now query CNS to get the target routes that should be there in the networknamespace (as a result of update) log.Printf("Going to collect target routes for [name=%v, namespace=%v] from CNS.", k8sPodName, k8sNamespace) - if cnsClient, err = cnsclient.NewCnsClient(nwCfg.CNSUrl); err != nil { + if cnsClient, err = cnsclient.InitCnsClient(nwCfg.CNSUrl); err != nil { log.Printf("Initializing CNS client error in CNI Update%v", err) log.Printf(err.Error()) return plugin.Errorf(err.Error()) diff --git a/cni/network/network_linux.go b/cni/network/network_linux.go index cd771afe2a..85042ae4d8 100644 --- a/cni/network/network_linux.go +++ b/cni/network/network_linux.go @@ -58,6 +58,7 @@ func setEndpointOptions(cnsNwConfig *cns.GetNetworkContainerResponse, epInfo *ne epInfo.Data[network.SnatBridgeIPKey] = cnsNwConfig.LocalIPConfiguration.GatewayIPAddress + "/" + strconv.Itoa(int(cnsNwConfig.LocalIPConfiguration.IPSubnet.PrefixLength)) epInfo.AllowInboundFromHostToNC = cnsNwConfig.AllowHostToNCCommunication epInfo.AllowInboundFromNCToHost = cnsNwConfig.AllowNCToHostCommunication + epInfo.NetworkContainerID = cnsNwConfig.NetworkContainerID } epInfo.Data[network.OptVethName] = vethName diff --git a/cni/network/network_windows.go b/cni/network/network_windows.go index 9df1fa3558..036390dd11 100644 --- a/cni/network/network_windows.go +++ b/cni/network/network_windows.go @@ -93,6 +93,9 @@ func setEndpointOptions(cnsNwConfig *cns.GetNetworkContainerResponse, epInfo *ne cnetAddressMap = append(cnetAddressMap, ipSubnet.IPAddress+"/"+strconv.Itoa(int(ipSubnet.PrefixLength))) } epInfo.Data[network.CnetAddressSpace] = cnetAddressMap + epInfo.AllowInboundFromHostToNC = cnsNwConfig.AllowHostToNCCommunication + epInfo.AllowInboundFromNCToHost = cnsNwConfig.AllowNCToHostCommunication + epInfo.NetworkContainerID = cnsNwConfig.NetworkContainerID } } diff --git a/cns/NetworkContainerContract.go b/cns/NetworkContainerContract.go index 1816e5187c..55d82e7118 100644 --- a/cns/NetworkContainerContract.go +++ b/cns/NetworkContainerContract.go @@ -131,6 +131,7 @@ type GetNetworkContainerRequest struct { // GetNetworkContainerResponse describes the response to retrieve a specifc network container. type GetNetworkContainerResponse struct { + NetworkContainerID string IPConfiguration IPConfiguration Routes []Route CnetAddressSpace []IPSubnet diff --git a/cns/api.go b/cns/api.go index 42ed300fa3..e933d42b56 100644 --- a/cns/api.go +++ b/cns/api.go @@ -7,20 +7,22 @@ import "encoding/json" // Container Network Service remote API Contract const ( - SetEnvironmentPath = "/network/environment" - CreateNetworkPath = "/network/create" - DeleteNetworkPath = "/network/delete" - CreateHnsNetworkPath = "/network/hns/create" - DeleteHnsNetworkPath = "/network/hns/delete" - ReserveIPAddressPath = "/network/ip/reserve" - ReleaseIPAddressPath = "/network/ip/release" - GetHostLocalIPPath = "/network/ip/hostlocal" - GetIPAddressUtilizationPath = "/network/ip/utilization" - GetUnhealthyIPAddressesPath = "/network/ipaddresses/unhealthy" - GetHealthReportPath = "/network/health" - NumberOfCPUCoresPath = "/hostcpucores" - V1Prefix = "/v0.1" - V2Prefix = "/v0.2" + SetEnvironmentPath = "/network/environment" + CreateNetworkPath = "/network/create" + DeleteNetworkPath = "/network/delete" + CreateHnsNetworkPath = "/network/hns/create" + DeleteHnsNetworkPath = "/network/hns/delete" + ReserveIPAddressPath = "/network/ip/reserve" + ReleaseIPAddressPath = "/network/ip/release" + GetHostLocalIPPath = "/network/ip/hostlocal" + GetIPAddressUtilizationPath = "/network/ip/utilization" + GetUnhealthyIPAddressesPath = "/network/ipaddresses/unhealthy" + GetHealthReportPath = "/network/health" + NumberOfCPUCoresPath = "/hostcpucores" + CreateHostNCApipaEndpointPath = "/network/createhostncapipaendpoint" + DeleteHostNCApipaEndpointPath = "/network/deletehostncapipaendpoint" + V1Prefix = "/v0.1" + V2Prefix = "/v0.2" ) // SetEnvironmentRequest describes the Request to set the environment in CNS. @@ -153,3 +155,27 @@ type OptionMap map[string]interface{} type errorResponse struct { Err string } + +// CreateHostNCApipaEndpointRequest describes request for create apipa endpoint +// for host container connectivity for the given network container +type CreateHostNCApipaEndpointRequest struct { + NetworkContainerID string +} + +// CreateHostNCApipaEndpointResponse describes response for create apipa endpoint request +// for host container connectivity. +type CreateHostNCApipaEndpointResponse struct { + Response Response + EndpointID string +} + +// DeleteHostNCApipaEndpointRequest describes request for deleting apipa endpoint created +// for host NC connectivity. +type DeleteHostNCApipaEndpointRequest struct { + NetworkContainerID string +} + +// DeleteHostNCApipaEndpointResponse describes response for delete host NC apipa endpoint request. +type DeleteHostNCApipaEndpointResponse struct { + Response Response +} diff --git a/cns/cnsclient/cnsclient.go b/cns/cnsclient/cnsclient.go index aef2749c6d..613b226fd9 100644 --- a/cns/cnsclient/cnsclient.go +++ b/cns/cnsclient/cnsclient.go @@ -19,15 +19,34 @@ const ( defaultCnsURL = "http://localhost:10090" ) -// NewCnsClient create a new cns client. -func NewCnsClient(url string) (*CNSClient, error) { - if url == "" { - url = defaultCnsURL +var ( + cnsClient *CNSClient +) + +// InitCnsClient initializes new cns client and returns the object +func InitCnsClient(url string) (*CNSClient, error) { + if cnsClient == nil { + if url == "" { + url = defaultCnsURL + } + + cnsClient = &CNSClient{ + connectionURL: url, + } } - return &CNSClient{ - connectionURL: url, - }, nil + return cnsClient, nil +} + +// GetCnsClient returns the cns client object +func GetCnsClient() (*CNSClient, error) { + var err error + + if cnsClient == nil { + err = fmt.Errorf("[Azure CNSClient] CNS Client not initialized") + } + + return cnsClient, err } // GetNetworkConfiguration Request to get network config. @@ -77,3 +96,105 @@ func (cnsClient *CNSClient) GetNetworkConfiguration(orchestratorContext []byte) return &resp, nil } + +// CreateHostNCApipaEndpoint creates an endpoint in APIPA network for host container connectivity. +func (cnsClient *CNSClient) CreateHostNCApipaEndpoint( + networkContainerID string) (string, error) { + var ( + err error + body bytes.Buffer + ) + + httpc := &http.Client{} + url := cnsClient.connectionURL + cns.CreateHostNCApipaEndpointPath + log.Printf("CreateHostNCApipaEndpoint url: %v for NC: %s", url, networkContainerID) + + payload := &cns.CreateHostNCApipaEndpointRequest{ + NetworkContainerID: networkContainerID, + } + + if err = json.NewEncoder(&body).Encode(payload); err != nil { + log.Errorf("encoding json failed with %v", err) + return "", err + } + + res, err := httpc.Post(url, "application/json", &body) + if err != nil { + log.Errorf("[Azure CNSClient] HTTP Post returned error %v", err.Error()) + return "", err + } + + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + errMsg := fmt.Sprintf("[Azure CNSClient] CreateHostNCApipaEndpoint: Invalid http status code: %v", + res.StatusCode) + log.Errorf(errMsg) + return "", fmt.Errorf(errMsg) + } + + var resp cns.CreateHostNCApipaEndpointResponse + + if err = json.NewDecoder(res.Body).Decode(&resp); err != nil { + log.Errorf("[Azure CNSClient] Error parsing CreateHostNCApipaEndpoint response resp: %v err: %v", + res.Body, err.Error()) + return "", err + } + + if resp.Response.ReturnCode != 0 { + log.Errorf("[Azure CNSClient] CreateHostNCApipaEndpoint received error response :%v", resp.Response.Message) + return "", fmt.Errorf(resp.Response.Message) + } + + return resp.EndpointID, nil +} + +// DeleteHostNCApipaEndpoint deletes the endpoint in APIPA network created for host container connectivity. +func (cnsClient *CNSClient) DeleteHostNCApipaEndpoint(networkContainerID string) error { + var body bytes.Buffer + + httpc := &http.Client{} + url := cnsClient.connectionURL + cns.DeleteHostNCApipaEndpointPath + log.Printf("DeleteHostNCApipaEndpoint url: %v for NC: %s", url, networkContainerID) + + payload := &cns.DeleteHostNCApipaEndpointRequest{ + NetworkContainerID: networkContainerID, + } + + err := json.NewEncoder(&body).Encode(payload) + if err != nil { + log.Errorf("encoding json failed with %v", err) + return err + } + + res, err := httpc.Post(url, "application/json", &body) + if err != nil { + log.Errorf("[Azure CNSClient] HTTP Post returned error %v", err.Error()) + return err + } + + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + errMsg := fmt.Sprintf("[Azure CNSClient] DeleteHostNCApipaEndpoint: Invalid http status code: %v", + res.StatusCode) + log.Errorf(errMsg) + return fmt.Errorf(errMsg) + } + + var resp cns.DeleteHostNCApipaEndpointResponse + + err = json.NewDecoder(res.Body).Decode(&resp) + if err != nil { + log.Errorf("[Azure CNSClient] Error parsing DeleteHostNCApipaEndpoint response resp: %v err: %v", + res.Body, err.Error()) + return err + } + + if resp.Response.ReturnCode != 0 { + log.Errorf("[Azure CNSClient] DeleteHostNCApipaEndpoint received error response :%v", resp.Response.Message) + return fmt.Errorf(resp.Response.Message) + } + + return nil +} diff --git a/cns/hnsclient/hnsclient_linux.go b/cns/hnsclient/hnsclient_linux.go index 186dec2eb3..a78acb9fb8 100644 --- a/cns/hnsclient/hnsclient_linux.go +++ b/cns/hnsclient/hnsclient_linux.go @@ -30,3 +30,22 @@ func CreateHnsNetwork(nwConfig cns.CreateHnsNetworkRequest) error { func DeleteHnsNetwork(networkName string) error { return fmt.Errorf("DeleteHnsNetwork shouldn't be called for linux platform") } + +// CreateHostNCApipaEndpoint creates the endpoint in the apipa network +// for host container connectivity +// This is windows platform specific. +func CreateHostNCApipaEndpoint( + networkContainerID string, + localIPConfiguration cns.IPConfiguration, + allowNCToHostCommunication bool, + allowHostToNCCommunication bool) (string, error) { + return "", nil +} + +// DeleteHostNCApipaEndpoint deletes the endpoint in the apipa network +// created for host container connectivity +// This is windows platform specific. +func DeleteHostNCApipaEndpoint( + networkContainerID string) error { + return nil +} diff --git a/cns/hnsclient/hnsclient_windows.go b/cns/hnsclient/hnsclient_windows.go index 249807efda..1fe9333fa8 100644 --- a/cns/hnsclient/hnsclient_windows.go +++ b/cns/hnsclient/hnsclient_windows.go @@ -3,11 +3,17 @@ package hnsclient import ( "encoding/json" "fmt" - "log" + "net" + "strconv" "strings" "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/networkcontainers" + "github.com/Azure/azure-container-networking/common" + "github.com/Azure/azure-container-networking/log" + "github.com/Azure/azure-container-networking/network/policy" "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/hcn" ) const ( @@ -23,6 +29,48 @@ const ( // HNS network types hnsL2Bridge = "l2bridge" hnsL2Tunnel = "l2tunnel" + + // hcnSchemaVersionMajor indicates major version number for hcn schema + hcnSchemaVersionMajor = 2 + + // hcnSchemaVersionMinor indicates minor version number for hcn schema + hcnSchemaVersionMinor = 0 + + // hcnIpamTypeStatic indicates the static type of ipam + hcnIpamTypeStatic = "Static" + + // hostNCApipaNetworkName indicates the name of the apipa network used for host container connectivity + hostNCApipaNetworkName = "HostNCApipaNetwork" + + // hostNCApipaNetworkType indicates the type of hns network set up for host NC connectivity + hostNCApipaNetworkType = hcn.L2Bridge + + // hostNCApipaEndpointName indicates the prefix for the name of the apipa endpoint used for + // the host container connectivity + hostNCApipaEndpointNamePrefix = "HostNCApipaEndpoint" + + // Name of the loopback adapter needed to create Host NC apipa network + hostNCLoopbackAdapterName = "LoopbackAdapterHostNCConnectivity" + + // protocolTCP indicates the TCP protocol identifier in HCN + protocolTCP = "6" + + // protocolUDP indicates the UDP protocol identifier in HCN + protocolUDP = "17" + + // protocolICMPv4 indicates the ICMPv4 protocol identifier in HCN + protocolICMPv4 = "1" + + // aclPriority2000 indicates the ACL priority of 2000 + aclPriority2000 = 2000 + + // aclPriority200 indicates the ACL priority of 200 + aclPriority200 = 200 +) + +var ( + // Named Lock for network and endpoint creation/deletion + namedLock = common.InitNamedLock() ) // CreateHnsNetwork creates the HNS network with the provided configuration @@ -153,3 +201,454 @@ func deleteHnsNetwork(networkName string) error { return err } + +func configureHostNCApipaNetwork(localIPConfiguration cns.IPConfiguration) (*hcn.HostComputeNetwork, error) { + network := &hcn.HostComputeNetwork{ + Name: hostNCApipaNetworkName, + Ipams: []hcn.Ipam{ + hcn.Ipam{ + Type: hcnIpamTypeStatic, + }, + }, + SchemaVersion: hcn.SchemaVersion{ + Major: hcnSchemaVersionMajor, + Minor: hcnSchemaVersionMinor, + }, + Type: hostNCApipaNetworkType, + Flags: hcn.EnableNonPersistent, // Set up the network in non-persistent mode + } + + if netAdapterNamePolicy, err := policy.GetHcnNetAdapterPolicy(hostNCLoopbackAdapterName); err == nil { + network.Policies = append(network.Policies, netAdapterNamePolicy) + } else { + return nil, fmt.Errorf("Failed to serialize network adapter policy. Error: %v", err) + } + + // Calculate subnet prefix + // Following code calculates the subnet prefix from localIPConfiguration IP + // e.g. IP: 169.254.128.7 Prefix length: 17 then resulting subnet prefix: 169.254.128.0/17 + // subnetPrefix: ffff8000 + // subnetPrefix.IP: 169.254.128.0 + var ( + subnetPrefix net.IPNet + subnetPrefixStr string + ipAddr net.IP + ) + + ipAddr = net.ParseIP(localIPConfiguration.IPSubnet.IPAddress) + if ipAddr.To4() != nil { + subnetPrefix = net.IPNet{Mask: net.CIDRMask(int(localIPConfiguration.IPSubnet.PrefixLength), 32)} + } else if ipAddr.To16() != nil { + subnetPrefix = net.IPNet{Mask: net.CIDRMask(int(localIPConfiguration.IPSubnet.PrefixLength), 128)} + } else { + return nil, fmt.Errorf("Failed get subnet prefix for localIPConfiguration: %+v", localIPConfiguration) + } + + subnetPrefix.IP = ipAddr.Mask(subnetPrefix.Mask) + subnetPrefixStr = subnetPrefix.IP.String() + "/" + strconv.Itoa(int(localIPConfiguration.IPSubnet.PrefixLength)) + + subnet := hcn.Subnet{ + IpAddressPrefix: subnetPrefixStr, + Routes: []hcn.Route{ + hcn.Route{ + NextHop: localIPConfiguration.GatewayIPAddress, + DestinationPrefix: "0.0.0.0/0", + }, + }, + } + + network.Ipams[0].Subnets = append(network.Ipams[0].Subnets, subnet) + + log.Printf("[Azure CNS] Configured HostNCApipaNetwork: %+v", network) + + return network, nil +} + +func createHostNCApipaNetwork( + localIPConfiguration cns.IPConfiguration) (*hcn.HostComputeNetwork, error) { + var ( + network *hcn.HostComputeNetwork + err error + ) + + namedLock.LockAcquire(hostNCApipaNetworkName) + defer namedLock.LockRelease(hostNCApipaNetworkName) + + // Check if the network exists for Host NC connectivity + if network, err = hcn.GetNetworkByName(hostNCApipaNetworkName); err != nil { + // If error is anything other than networkNotFound, mark this as error + if _, networkNotFound := err.(hcn.NetworkNotFoundError); !networkNotFound { + return nil, fmt.Errorf("[Azure CNS] ERROR: createApipaNetwork failed. Error with GetNetworkByName: %v", err) + } + + // Network doesn't exist. Create one. + if network, err = configureHostNCApipaNetwork(localIPConfiguration); err != nil { + return nil, fmt.Errorf("Failed to configure network. Error: %v", err) + } + + // Create loopback adapter needed for this HNS network + if interfaceExists, _ := networkcontainers.InterfaceExists(hostNCLoopbackAdapterName); !interfaceExists { + ipconfig := cns.IPConfiguration{ + IPSubnet: cns.IPSubnet{ + IPAddress: localIPConfiguration.GatewayIPAddress, + PrefixLength: localIPConfiguration.IPSubnet.PrefixLength, + }, + GatewayIPAddress: localIPConfiguration.GatewayIPAddress, + } + + if err = networkcontainers.CreateLoopbackAdapter( + hostNCLoopbackAdapterName, + ipconfig, + false, /* Flag to setWeakHostOnInterface */ + "" /* Empty primary Interface Identifier as setWeakHostOnInterface is not needed*/); err != nil { + return nil, fmt.Errorf("Failed to create loopback adapter. Error: %v", err) + } + } + + // Create the HNS network. + log.Printf("[Azure CNS] Creating HostNCApipaNetwork: %+v", network) + + if network, err = network.Create(); err != nil { + return nil, err + } + + log.Printf("[Azure CNS] Successfully created apipa network for host container connectivity: %+v", network) + } else { + log.Printf("[Azure CNS] Found existing HostNCApipaNetwork: %+v", network) + } + + return network, err +} + +func addAclToEndpointPolicy( + aclPolicySetting hcn.AclPolicySetting, + endpointPolicies *[]hcn.EndpointPolicy) error { + var ( + rawJSON []byte + err error + ) + + if rawJSON, err = json.Marshal(aclPolicySetting); err != nil { + return fmt.Errorf("Failed to marshal endpoint ACL: %+v", aclPolicySetting) + } + + endpointPolicy := hcn.EndpointPolicy{ + Type: hcn.ACL, + Settings: rawJSON, + } + + *endpointPolicies = append(*endpointPolicies, endpointPolicy) + + return nil +} + +func configureAclSettingHostNCApipaEndpoint( + protocolList []string, + networkContainerApipaIP string, + hostApipaIP string, + allowNCToHostCommunication bool, + allowHostToNCCommunication bool) ([]hcn.EndpointPolicy, error) { + var ( + err error + endpointPolicies []hcn.EndpointPolicy + ) + + if allowNCToHostCommunication { + log.Printf("[Azure CNS] Allowing NC (%s) to Host (%s) connectivity", networkContainerApipaIP, hostApipaIP) + } + + if allowHostToNCCommunication { + log.Printf("[Azure CNS] Allowing Host (%s) to NC (%s) connectivity", hostApipaIP, networkContainerApipaIP) + } + + // Iterate thru the protocol list and add ACL for each + for _, protocol := range protocolList { + // Endpoint ACL to block all outbound traffic from the Apipa IP of the container + outBlockAll := hcn.AclPolicySetting{ + Protocols: protocol, + Action: hcn.ActionTypeBlock, + Direction: hcn.DirectionTypeOut, + LocalAddresses: networkContainerApipaIP, + RuleType: hcn.RuleTypeSwitch, + Priority: aclPriority2000, + } + + if err = addAclToEndpointPolicy(outBlockAll, &endpointPolicies); err != nil { + return nil, err + } + + if allowNCToHostCommunication { + // Endpoint ACL to allow the outbound traffic from the Apipa IP of the container to + // Apipa IP of the host only + outAllowToHostOnly := hcn.AclPolicySetting{ + Protocols: protocol, + Action: hcn.ActionTypeAllow, + Direction: hcn.DirectionTypeOut, + LocalAddresses: networkContainerApipaIP, + RemoteAddresses: hostApipaIP, + RuleType: hcn.RuleTypeSwitch, + Priority: aclPriority200, + } + + if err = addAclToEndpointPolicy(outAllowToHostOnly, &endpointPolicies); err != nil { + return nil, err + } + } + + // Endpoint ACL to block all inbound traffic to the Apipa IP of the container + inBlockAll := hcn.AclPolicySetting{ + Protocols: protocol, + Action: hcn.ActionTypeBlock, + Direction: hcn.DirectionTypeIn, + LocalAddresses: networkContainerApipaIP, + RuleType: hcn.RuleTypeSwitch, + Priority: aclPriority2000, + } + + if err = addAclToEndpointPolicy(inBlockAll, &endpointPolicies); err != nil { + return nil, err + } + + if allowHostToNCCommunication { + // Endpoint ACL to allow the inbound traffic from the apipa IP of the host to + // the apipa IP of the container only + inAllowFromHostOnly := hcn.AclPolicySetting{ + Protocols: protocol, + Action: hcn.ActionTypeAllow, + Direction: hcn.DirectionTypeIn, + LocalAddresses: networkContainerApipaIP, + RemoteAddresses: hostApipaIP, + RuleType: hcn.RuleTypeSwitch, + Priority: aclPriority200, + } + + if err = addAclToEndpointPolicy(inAllowFromHostOnly, &endpointPolicies); err != nil { + return nil, err + } + } + } + + return endpointPolicies, nil +} + +func configureHostNCApipaEndpoint( + endpointName string, + networkID string, + localIPConfiguration cns.IPConfiguration, + allowNCToHostCommunication bool, + allowHostToNCCommunication bool) (*hcn.HostComputeEndpoint, error) { + endpoint := &hcn.HostComputeEndpoint{ + Name: endpointName, + HostComputeNetwork: networkID, + SchemaVersion: hcn.SchemaVersion{ + Major: hcnSchemaVersionMajor, + Minor: hcnSchemaVersionMinor, + }, + } + + networkContainerApipaIP := localIPConfiguration.IPSubnet.IPAddress + hostApipaIP := localIPConfiguration.GatewayIPAddress + protocolList := []string{protocolICMPv4, protocolTCP, protocolUDP} + + endpointPolicies, err := configureAclSettingHostNCApipaEndpoint( + protocolList, + networkContainerApipaIP, + hostApipaIP, + allowNCToHostCommunication, + allowHostToNCCommunication) + + if err != nil { + log.Errorf("[Azure CNS] Failed to configure ACL for HostNCApipaEndpoint. Error: %v", err) + return nil, err + } + + for _, endpointPolicy := range endpointPolicies { + endpoint.Policies = append(endpoint.Policies, endpointPolicy) + } + + hcnRoute := hcn.Route{ + NextHop: hostApipaIP, + DestinationPrefix: "0.0.0.0/0", + } + + endpoint.Routes = append(endpoint.Routes, hcnRoute) + + ipConfiguration := hcn.IpConfig{ + IpAddress: networkContainerApipaIP, + PrefixLength: localIPConfiguration.IPSubnet.PrefixLength, + } + + endpoint.IpConfigurations = append(endpoint.IpConfigurations, ipConfiguration) + + log.Printf("[Azure CNS] Configured HostNCApipaEndpoint: %+v", endpoint) + + return endpoint, nil +} + +// CreateHostNCApipaEndpoint creates the endpoint in the apipa network for host container connectivity +func CreateHostNCApipaEndpoint( + networkContainerID string, + localIPConfiguration cns.IPConfiguration, + allowNCToHostCommunication bool, + allowHostToNCCommunication bool) (string, error) { + var ( + network *hcn.HostComputeNetwork + endpoint *hcn.HostComputeEndpoint + endpointName = getHostNCApipaEndpointName(networkContainerID) + err error + ) + + namedLock.LockAcquire(endpointName) + defer namedLock.LockRelease(endpointName) + + // Return if the endpoint already exists + if endpoint, err = hcn.GetEndpointByName(endpointName); err != nil { + // If error is anything other than EndpointNotFoundError, return error. + if _, endpointNotFound := err.(hcn.EndpointNotFoundError); !endpointNotFound { + return "", fmt.Errorf("ERROR: Failed to query endpoint using GetEndpointByName "+ + "due to error: %v", err) + } + } + + if endpoint != nil { + log.Debugf("[Azure CNS] Found existing endpoint: %+v", endpoint) + return endpoint.Id, nil + } + + if network, err = createHostNCApipaNetwork(localIPConfiguration); err != nil { + log.Errorf("[Azure CNS] Failed to create HostNCApipaNetwork. Error: %v", err) + return "", err + } + + log.Printf("[Azure CNS] Configuring HostNCApipaEndpoint: %s, in network: %s with localIPConfig: %+v", + endpointName, network.Id, localIPConfiguration) + + if endpoint, err = configureHostNCApipaEndpoint( + endpointName, + network.Id, + localIPConfiguration, + allowNCToHostCommunication, + allowHostToNCCommunication); err != nil { + log.Errorf("[Azure CNS] Failed to configure HostNCApipaEndpoint: %s. Error: %v", endpointName, err) + return "", err + } + + log.Printf("[Azure CNS] Creating HostNCApipaEndpoint for host container connectivity: %+v", endpoint) + if endpoint, err = endpoint.Create(); err != nil { + err = fmt.Errorf("Failed to create HostNCApipaEndpoint: %s. Error: %v", endpointName, err) + log.Errorf("[Azure CNS] %s", err.Error()) + return "", err + } + + log.Printf("[Azure CNS] Successfully created HostNCApipaEndpoint: %+v", endpoint) + + return endpoint.Id, nil +} + +func getHostNCApipaEndpointName( + networkContainerID string) string { + return hostNCApipaEndpointNamePrefix + "-" + networkContainerID +} + +func deleteNetworkByIDHnsV2( + networkID string) error { + var ( + network *hcn.HostComputeNetwork + err error + ) + + if network, err = hcn.GetNetworkByID(networkID); err != nil { + // If error is anything other than NetworkNotFoundError, return error. + // else log the error but don't return error because network is already deleted. + if _, networkNotFound := err.(hcn.NetworkNotFoundError); !networkNotFound { + return fmt.Errorf("[Azure CNS] deleteNetworkByIDHnsV2 failed due to "+ + "error with GetNetworkByID: %v", err) + } + + log.Errorf("[Azure CNS] Delete called on the Network: %s which doesn't exist. Error: %v", + networkID, err) + + return nil + } + + if err = network.Delete(); err != nil { + return fmt.Errorf("Failed to delete network: %+v. Error: %v", network, err) + } + + log.Errorf("[Azure CNS] Successfully deleted network: %+v", network) + + return nil +} + +func deleteEndpointByNameHnsV2( + endpointName string) error { + var ( + endpoint *hcn.HostComputeEndpoint + err error + ) + + // Check if the endpoint exists + if endpoint, err = hcn.GetEndpointByName(endpointName); err != nil { + // If error is anything other than EndpointNotFoundError, return error. + // else log the error but don't return error because endpoint is already deleted. + if _, endpointNotFound := err.(hcn.EndpointNotFoundError); !endpointNotFound { + return fmt.Errorf("[Azure CNS] deleteEndpointByNameHnsV2 failed due to "+ + "error with GetEndpointByName: %v", err) + } + + log.Errorf("[Azure CNS] Delete called on the Endpoint: %s which doesn't exist. Error: %v", + endpointName, err) + + return nil + } + + if err = endpoint.Delete(); err != nil { + return fmt.Errorf("Failed to delete endpoint: %+v. Error: %v", endpoint, err) + } + + log.Errorf("[Azure CNS] Successfully deleted endpoint: %+v", endpoint) + + return nil +} + +// DeleteHostNCApipaEndpoint deletes the endpoint in the apipa network created for host container connectivity +func DeleteHostNCApipaEndpoint( + networkContainerID string) error { + endpointName := getHostNCApipaEndpointName(networkContainerID) + + namedLock.LockAcquire(endpointName) + defer namedLock.LockRelease(endpointName) + + log.Debugf("[Azure CNS] Deleting HostNCApipaEndpoint: %s", endpointName) + + if err := deleteEndpointByNameHnsV2(endpointName); err != nil { + log.Errorf("[Azure CNS] Failed to delete HostNCApipaEndpoint: %s. Error: %v", endpointName, err) + return err + } + + log.Debugf("[Azure CNS] Successfully deleted HostNCApipaEndpoint: %s", endpointName) + + namedLock.LockAcquire(hostNCApipaNetworkName) + defer namedLock.LockRelease(hostNCApipaNetworkName) + + // Check if hostNCApipaNetworkName has any endpoints left + if network, err := hcn.GetNetworkByName(hostNCApipaNetworkName); err == nil { + var endpoints []hcn.HostComputeEndpoint + if endpoints, err = hcn.ListEndpointsOfNetwork(network.Id); err != nil { + log.Errorf("[Azure CNS] Failed to list endpoints in the network: %s. Error: %v", + hostNCApipaNetworkName, err) + return nil + } + + // Delete network if it doesn't have any endpoints + if len(endpoints) == 0 { + log.Debugf("[Azure CNS] Deleting network with ID: %s", network.Id) + if err = deleteNetworkByIDHnsV2(network.Id); err == nil { + // Delete the loopback adapter created for this network + networkcontainers.DeleteLoopbackAdapter(hostNCLoopbackAdapterName) + } + } + } + + return nil +} diff --git a/cns/networkcontainers/networkcontainers.go b/cns/networkcontainers/networkcontainers.go index d1218f8a71..87ea0b8efc 100644 --- a/cns/networkcontainers/networkcontainers.go +++ b/cns/networkcontainers/networkcontainers.go @@ -52,7 +52,7 @@ func NewNetPluginConfiguration(binPath, configPath string) *NetPluginConfigurati } } -func interfaceExists(iFaceName string) (bool, error) { +func InterfaceExists(iFaceName string) (bool, error) { _, err := net.InterfaceByName(iFaceName) if err != nil { errMsg := fmt.Sprintf("[Azure CNS] Unable to get interface by name %s. Error: %v", iFaceName, err) @@ -94,6 +94,25 @@ func (cn *NetworkContainers) Delete(networkContainerID string) error { return err } +// CreateLoopbackAdapter creates a loopback adapter with the specified settings +func CreateLoopbackAdapter( + adapterName string, + ipConfig cns.IPConfiguration, + setWeakHostOnInterface bool, + primaryInterfaceIdentifier string) error { + return createOrUpdateWithOperation( + adapterName, + ipConfig, + setWeakHostOnInterface, // Flag to setWeakHostOnInterface + primaryInterfaceIdentifier, + "CREATE") +} + +// DeleteLoopbackAdapter deletes loopback adapter with the specified name +func DeleteLoopbackAdapter(adapterName string) error { + return deleteInterface(adapterName) +} + // This function gets the flattened network configuration (compliant with azure cni) in byte array format func getNetworkConfig(configFilePath string) ([]byte, error) { content, err := ioutil.ReadFile(configFilePath) diff --git a/cns/networkcontainers/networkcontainers_linux.go b/cns/networkcontainers/networkcontainers_linux.go index 0da7ce76a3..e776988c4b 100644 --- a/cns/networkcontainers/networkcontainers_linux.go +++ b/cns/networkcontainers/networkcontainers_linux.go @@ -90,3 +90,12 @@ func deleteInterface(networkContainerID string) error { func configureNetworkContainerNetworking(operation, podName, podNamespace, dockerContainerid string, netPluginConfig *NetPluginConfiguration) (err error) { return fmt.Errorf("[Azure CNS] Operation is not supported in linux.") } + +func createOrUpdateWithOperation( + adapterName string, + ipConfig cns.IPConfiguration, + setWeakHost bool, + primaryInterfaceIdentifier string, + operation string) error { + return nil +} diff --git a/cns/networkcontainers/networkcontainers_windows.go b/cns/networkcontainers/networkcontainers_windows.go index b3b8b912ae..f5439b951b 100644 --- a/cns/networkcontainers/networkcontainers_windows.go +++ b/cns/networkcontainers/networkcontainers_windows.go @@ -27,11 +27,21 @@ func createOrUpdateInterface(createNetworkContainerRequest cns.CreateNetworkCont return nil } - if exists, _ := interfaceExists(createNetworkContainerRequest.NetworkContainerid); !exists { - return createOrUpdateWithOperation(createNetworkContainerRequest, "CREATE") + if exists, _ := InterfaceExists(createNetworkContainerRequest.NetworkContainerid); !exists { + return createOrUpdateWithOperation( + createNetworkContainerRequest.NetworkContainerid, + createNetworkContainerRequest.IPConfiguration, + true, // Flag to setWeakHostOnInterface + createNetworkContainerRequest.PrimaryInterfaceIdentifier, + "CREATE") } - return createOrUpdateWithOperation(createNetworkContainerRequest, "UPDATE") + return createOrUpdateWithOperation( + createNetworkContainerRequest.NetworkContainerid, + createNetworkContainerRequest.IPConfiguration, + true, // Flag to setWeakHostOnInterface + createNetworkContainerRequest.PrimaryInterfaceIdentifier, + "UPDATE") } func updateInterface(createNetworkContainerRequest cns.CreateNetworkContainerRequest, netpluginConfig *NetPluginConfiguration) error { @@ -102,28 +112,31 @@ func setWeakHostOnInterface(ipAddress, ncID string) error { return nil } -func createOrUpdateWithOperation(createNetworkContainerRequest cns.CreateNetworkContainerRequest, operation string) error { +func createOrUpdateWithOperation( + adapterName string, + ipConfig cns.IPConfiguration, + setWeakHost bool, + primaryInterfaceIdentifier string, + operation string) error { if _, err := os.Stat("./AzureNetworkContainer.exe"); err != nil { - if os.IsNotExist(err) { - return errors.New("[Azure CNS] Unable to find AzureNetworkContainer.exe. Cannot continue") - } + return fmt.Errorf("[Azure CNS] Unable to find AzureNetworkContainer.exe. Cannot continue") } - if createNetworkContainerRequest.IPConfiguration.IPSubnet.IPAddress == "" { - return errors.New("[Azure CNS] IPAddress in IPConfiguration of createNetworkContainerRequest is nil") + if ipConfig.IPSubnet.IPAddress == "" { + return fmt.Errorf("[Azure CNS] IPAddress in IPConfiguration is nil") } - ipv4AddrCidr := fmt.Sprintf("%v/%d", createNetworkContainerRequest.IPConfiguration.IPSubnet.IPAddress, createNetworkContainerRequest.IPConfiguration.IPSubnet.PrefixLength) + ipv4AddrCidr := fmt.Sprintf("%v/%d", ipConfig.IPSubnet.IPAddress, ipConfig.IPSubnet.PrefixLength) log.Printf("[Azure CNS] Created ipv4Cidr as %v", ipv4AddrCidr) ipv4Addr, _, err := net.ParseCIDR(ipv4AddrCidr) - ipv4NetInt := net.CIDRMask((int)(createNetworkContainerRequest.IPConfiguration.IPSubnet.PrefixLength), 32) + ipv4NetInt := net.CIDRMask((int)(ipConfig.IPSubnet.PrefixLength), 32) log.Printf("[Azure CNS] Created netmask as %v", ipv4NetInt) ipv4NetStr := fmt.Sprintf("%d.%d.%d.%d", ipv4NetInt[0], ipv4NetInt[1], ipv4NetInt[2], ipv4NetInt[3]) log.Printf("[Azure CNS] Created netmask in string format %v", ipv4NetStr) args := []string{"/C", "AzureNetworkContainer.exe", "/logpath", log.GetLogDirectory(), "/name", - createNetworkContainerRequest.NetworkContainerid, + adapterName, "/operation", operation, "/ip", @@ -131,7 +144,7 @@ func createOrUpdateWithOperation(createNetworkContainerRequest cns.CreateNetwork "/netmask", ipv4NetStr, "/gateway", - createNetworkContainerRequest.IPConfiguration.GatewayIPAddress, + ipConfig.GatewayIPAddress, "/weakhostsend", "true", "/weakhostreceive", @@ -142,38 +155,34 @@ func createOrUpdateWithOperation(createNetworkContainerRequest cns.CreateNetwork loopbackOperationLock.Lock() log.Printf("[Azure CNS] Going to create/update network loopback adapter: %v", args) bytes, err := c.Output() - if err == nil { - err = setWeakHostOnInterface(createNetworkContainerRequest.PrimaryInterfaceIdentifier, - createNetworkContainerRequest.NetworkContainerid) + if err == nil && setWeakHost { + err = setWeakHostOnInterface(primaryInterfaceIdentifier, adapterName) } loopbackOperationLock.Unlock() if err == nil { - log.Printf("[Azure CNS] Successfully created network loopback adapter for NC: %s. Output:%v.", - createNetworkContainerRequest.NetworkContainerid, string(bytes)) + log.Printf("[Azure CNS] Successfully created network loopback adapter with name: %s and IP config: %+v. Output:%v.", + adapterName, ipConfig, string(bytes)) } else { - log.Printf("Failed to create/update Network Container: %s. Error: %v. Output: %v", - createNetworkContainerRequest.NetworkContainerid, err.Error(), string(bytes)) + log.Printf("[Azure CNS] Failed to create network loopback adapter with name: %s and IP config: %+v."+ + " Error: %v. Output: %v", adapterName, ipConfig, err, string(bytes)) } return err } -func deleteInterface(networkContainerID string) error { - +func deleteInterface(interfaceName string) error { if _, err := os.Stat("./AzureNetworkContainer.exe"); err != nil { - if os.IsNotExist(err) { - return errors.New("[Azure CNS] Unable to find AzureNetworkContainer.exe. Cannot continue") - } + return fmt.Errorf("[Azure CNS] Unable to find AzureNetworkContainer.exe. Cannot continue") } - if networkContainerID == "" { - return errors.New("[Azure CNS] networkContainerID is nil") + if interfaceName == "" { + return fmt.Errorf("[Azure CNS] Interface name is nil") } args := []string{"/C", "AzureNetworkContainer.exe", "/logpath", log.GetLogDirectory(), "/name", - networkContainerID, + interfaceName, "/operation", "DELETE"} @@ -185,14 +194,14 @@ func deleteInterface(networkContainerID string) error { loopbackOperationLock.Unlock() if err == nil { - log.Printf("[Azure CNS] Successfully deleted network container: %s. Output: %v.", - networkContainerID, string(bytes)) + log.Printf("[Azure CNS] Successfully deleted loopack adapter with name: %s. Output: %v.", + interfaceName, string(bytes)) } else { - log.Printf("Failed to delete Network Container: %s. Error:%v. Output:%v", - networkContainerID, err.Error(), string(bytes)) - return err + log.Printf("[Azure CNS] Failed to delete loopback adapter with name: %s. Error:%v. Output:%v", + interfaceName, err.Error(), string(bytes)) } - return nil + + return err } func configureNetworkContainerNetworking(operation, podName, podNamespace, dockerContainerid string, netPluginConfig *NetPluginConfiguration) (err error) { diff --git a/cns/restserver/api.go b/cns/restserver/api.go index 25d60e3ca2..f2a4fcbf2d 100644 --- a/cns/restserver/api.go +++ b/cns/restserver/api.go @@ -23,6 +23,7 @@ const ( DockerContainerNotSpecified = 20 UnsupportedVerb = 21 UnsupportedNetworkContainerType = 22 + InvalidRequest = 23 UnexpectedError = 99 ) diff --git a/cns/restserver/restserver.go b/cns/restserver/restserver.go index 9ee6894a09..f38c625d62 100644 --- a/cns/restserver/restserver.go +++ b/cns/restserver/restserver.go @@ -159,6 +159,8 @@ func (service *HTTPRestService) Start(config *common.ServiceConfig) error { listener.AddHandler(cns.CreateHnsNetworkPath, service.createHnsNetwork) listener.AddHandler(cns.DeleteHnsNetworkPath, service.deleteHnsNetwork) listener.AddHandler(cns.NumberOfCPUCoresPath, service.getNumberOfCPUCores) + listener.AddHandler(cns.CreateHostNCApipaEndpointPath, service.createHostNCApipaEndpoint) + listener.AddHandler(cns.DeleteHostNCApipaEndpointPath, service.deleteHostNCApipaEndpoint) // handlers for v0.2 listener.AddHandler(cns.V2Prefix+cns.SetEnvironmentPath, service.setEnvironment) @@ -180,6 +182,8 @@ func (service *HTTPRestService) Start(config *common.ServiceConfig) error { listener.AddHandler(cns.V2Prefix+cns.CreateHnsNetworkPath, service.createHnsNetwork) listener.AddHandler(cns.V2Prefix+cns.DeleteHnsNetworkPath, service.deleteHnsNetwork) listener.AddHandler(cns.V2Prefix+cns.NumberOfCPUCoresPath, service.getNumberOfCPUCores) + listener.AddHandler(cns.V2Prefix+cns.CreateHostNCApipaEndpointPath, service.createHostNCApipaEndpoint) + listener.AddHandler(cns.V2Prefix+cns.DeleteHostNCApipaEndpointPath, service.deleteHostNCApipaEndpoint) log.Printf("[Azure CNS] Listening.") return nil @@ -1032,6 +1036,8 @@ func (service *HTTPRestService) saveNetworkContainerGoalState(req cns.CreateNetw case cns.JobObject: fallthrough case cns.COW: + fallthrough + case cns.WebApps: switch service.state.OrchestratorType { case cns.Kubernetes: fallthrough @@ -1042,6 +1048,8 @@ func (service *HTTPRestService) saveNetworkContainerGoalState(req cns.CreateNetw case cns.DBforPostgreSQL: fallthrough case cns.AzureFirstParty: + fallthrough + case cns.WebApps: var podInfo cns.KubernetesPodInfo err := json.Unmarshal(req.OrchestratorContext, &podInfo) if err != nil { @@ -1095,9 +1103,7 @@ func (service *HTTPRestService) createOrUpdateNetworkContainer(w http.ResponseWr case "POST": if req.NetworkContainerType == cns.WebApps { // try to get the saved nc state if it exists - service.lock.Lock() - existing, ok := service.state.ContainerStatus[req.NetworkContainerid] - service.lock.Unlock() + existing, ok := service.getNetworkContainerDetails(req.NetworkContainerid) // create/update nc only if it doesn't exist or it exists and the requested version is different from the saved version if !ok || (ok && existing.VMVersion != req.Version) { @@ -1110,9 +1116,7 @@ func (service *HTTPRestService) createOrUpdateNetworkContainer(w http.ResponseWr } } else if req.NetworkContainerType == cns.AzureContainerInstance { // try to get the saved nc state if it exists - service.lock.Lock() - existing, ok := service.state.ContainerStatus[req.NetworkContainerid] - service.lock.Unlock() + existing, ok := service.getNetworkContainerDetails(req.NetworkContainerid) // create/update nc only if it doesn't exist or it exists and the requested version is different from the saved version if ok && existing.VMVersion != req.Version { @@ -1212,6 +1216,7 @@ func (service *HTTPRestService) getNetworkContainerResponse(req cns.GetNetworkCo savedReq := containerDetails.CreateNetworkContainerRequest getNetworkContainerResponse = cns.GetNetworkContainerResponse{ + NetworkContainerID: savedReq.NetworkContainerid, IPConfiguration: savedReq.IPConfiguration, Routes: savedReq.Routes, CnetAddressSpace: savedReq.CnetAddressSpace, @@ -1273,9 +1278,7 @@ func (service *HTTPRestService) deleteNetworkContainer(w http.ResponseWriter, r var containerStatus containerstatus var ok bool - service.lock.Lock() - containerStatus, ok = service.state.ContainerStatus[req.NetworkContainerid] - service.lock.Unlock() + containerStatus, ok = service.getNetworkContainerDetails(req.NetworkContainerid) if !ok { log.Printf("Not able to retrieve network container details for this container id %v", req.NetworkContainerid) @@ -1536,9 +1539,8 @@ func (service *HTTPRestService) attachOrDetachHelper(req cns.ConfigureContainerN Message: "[Azure CNS] Error. NetworkContainerid is empty"} } - service.lock.Lock() - existing, ok := service.state.ContainerStatus[cns.SwiftPrefix+req.NetworkContainerid] - service.lock.Unlock() + existing, ok := service.getNetworkContainerDetails(cns.SwiftPrefix + req.NetworkContainerid) + if !ok { return cns.Response{ ReturnCode: NotFound, @@ -1615,3 +1617,109 @@ func (service *HTTPRestService) getNumberOfCPUCores(w http.ResponseWriter, r *ht log.Response(service.Name, numOfCPUCoresResp, resp.ReturnCode, ReturnCodeToString(resp.ReturnCode), err) } + +func (service *HTTPRestService) getNetworkContainerDetails(networkContainerID string) (containerstatus, bool) { + service.lock.Lock() + defer service.lock.Unlock() + + containerDetails, containerExists := service.state.ContainerStatus[networkContainerID] + + return containerDetails, containerExists +} + +func (service *HTTPRestService) createHostNCApipaEndpoint(w http.ResponseWriter, r *http.Request) { + log.Printf("[Azure-CNS] createHostNCApipaEndpoint") + + var ( + err error + req cns.CreateHostNCApipaEndpointRequest + returnCode int + returnMessage string + endpointID string + ) + + err = service.Listener.Decode(w, r, &req) + log.Request(service.Name, &req, err) + if err != nil { + return + } + + switch r.Method { + case "POST": + networkContainerDetails, found := service.getNetworkContainerDetails(req.NetworkContainerID) + if found { + if !networkContainerDetails.CreateNetworkContainerRequest.AllowNCToHostCommunication && + !networkContainerDetails.CreateNetworkContainerRequest.AllowHostToNCCommunication { + returnMessage = fmt.Sprintf("HostNCApipaEndpoint creation is not supported unless " + + "AllowNCToHostCommunication or AllowHostToNCCommunication is set to true") + returnCode = InvalidRequest + } else { + if endpointID, err = hnsclient.CreateHostNCApipaEndpoint( + req.NetworkContainerID, + networkContainerDetails.CreateNetworkContainerRequest.LocalIPConfiguration, + networkContainerDetails.CreateNetworkContainerRequest.AllowNCToHostCommunication, + networkContainerDetails.CreateNetworkContainerRequest.AllowHostToNCCommunication); err != nil { + returnMessage = fmt.Sprintf("CreateHostNCApipaEndpoint failed with error: %v", err) + returnCode = UnexpectedError + } + } + } else { + returnMessage = fmt.Sprintf("CreateHostNCApipaEndpoint failed with error: Unable to find goal state for"+ + " the given Network Container: %s", req.NetworkContainerID) + returnCode = UnknownContainerID + } + default: + returnMessage = "createHostNCApipaEndpoint API expects a POST" + returnCode = UnsupportedVerb + } + + response := cns.CreateHostNCApipaEndpointResponse{ + Response: cns.Response{ + ReturnCode: returnCode, + Message: returnMessage, + }, + EndpointID: endpointID, + } + + err = service.Listener.Encode(w, &response) + log.Response(service.Name, response, response.Response.ReturnCode, ReturnCodeToString(response.Response.ReturnCode), err) +} + +func (service *HTTPRestService) deleteHostNCApipaEndpoint(w http.ResponseWriter, r *http.Request) { + log.Printf("[Azure-CNS] deleteHostNCApipaEndpoint") + + var ( + err error + req cns.DeleteHostNCApipaEndpointRequest + returnCode int + returnMessage string + ) + + err = service.Listener.Decode(w, r, &req) + log.Request(service.Name, &req, err) + if err != nil { + return + } + + switch r.Method { + case "POST": + if err = hnsclient.DeleteHostNCApipaEndpoint(req.NetworkContainerID); err != nil { + returnMessage = fmt.Sprintf("Failed to delete endpoint for Network Container: %s "+ + "due to error: %v", req.NetworkContainerID, err) + returnCode = UnexpectedError + } + default: + returnMessage = "deleteHostNCApipaEndpoint API expects a DELETE" + returnCode = UnsupportedVerb + } + + response := cns.DeleteHostNCApipaEndpointResponse{ + Response: cns.Response{ + ReturnCode: returnCode, + Message: returnMessage, + }, + } + + err = service.Listener.Encode(w, &response) + log.Response(service.Name, response, response.Response.ReturnCode, ReturnCodeToString(response.Response.ReturnCode), err) +} diff --git a/common/namedlock.go b/common/namedlock.go new file mode 100644 index 0000000000..68f0a8b625 --- /dev/null +++ b/common/namedlock.go @@ -0,0 +1,79 @@ +package common + +import ( + "sync" + + "github.com/Azure/azure-container-networking/log" +) + +// NamedLock holds a mutex and a map of locks. Mutex is used to +// get exclusive lock on the map while initializing the lock in the +// map. +type NamedLock struct { + mutex sync.Mutex + lockMap map[string]*refCountedLock +} + +// refCountedLock holds the lock and ref count for it +type refCountedLock struct { + mutex sync.RWMutex + refCount int +} + +// InitNamedLock initializes the named lock struct +func InitNamedLock() *NamedLock { + return &NamedLock{ + mutex: sync.Mutex{}, + lockMap: make(map[string]*refCountedLock), + } +} + +// LockAcquire acquires the lock with specified name +func (namedLock *NamedLock) LockAcquire(lockName string) { + namedLock.mutex.Lock() + _, ok := namedLock.lockMap[lockName] + if !ok { + namedLock.lockMap[lockName] = &refCountedLock{refCount: 0} + } + + namedLock.lockMap[lockName].AddRef() + namedLock.mutex.Unlock() + namedLock.lockMap[lockName].Lock() +} + +// LockRelease releases the lock with specified name +func (namedLock *NamedLock) LockRelease(lockName string) { + namedLock.mutex.Lock() + defer namedLock.mutex.Unlock() + + lock, ok := namedLock.lockMap[lockName] + if ok { + lock.Unlock() + lock.RemoveRef() + if lock.refCount == 0 { + delete(namedLock.lockMap, lockName) + } + } else { + log.Printf("[Azure CNS] Attempt to unlock: %s without acquiring the lock", lockName) + } +} + +// AddRef increments the ref count on the lock +func (refCountedLock *refCountedLock) AddRef() { + refCountedLock.refCount++ +} + +// RemoveRef decrements the ref count on the lock +func (refCountedLock *refCountedLock) RemoveRef() { + refCountedLock.refCount-- +} + +// Lock locks the named lock +func (refCountedLock *refCountedLock) Lock() { + refCountedLock.mutex.Lock() +} + +// Unlock unlocks the named lock +func (refCountedLock *refCountedLock) Unlock() { + refCountedLock.mutex.Unlock() +} diff --git a/common/utils.go b/common/utils.go index 0e35406b51..7c5b7b7b6b 100644 --- a/common/utils.go +++ b/common/utils.go @@ -6,15 +6,25 @@ package common import ( "bufio" "encoding/binary" + "encoding/json" "encoding/xml" "fmt" "io" + "io/ioutil" "net" + "net/http" "os" + "time" "github.com/Azure/azure-container-networking/log" ) +const ( + metadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-08-01&format=json" + httpConnectionTimeout = 10 + headerTimeout = 20 +) + // XmlDocument - Azure host agent XML document format. type XmlDocument struct { XMLName xml.Name `xml:"Interfaces"` @@ -36,6 +46,31 @@ type XmlDocument struct { } } +// Metadata retrieved from wireserver +type Metadata struct { + Location string `json:"location"` + VMName string `json:"name"` + Offer string `json:"offer"` + OsType string `json:"osType"` + PlacementGroupID string `json:"placementGroupId"` + PlatformFaultDomain string `json:"platformFaultDomain"` + PlatformUpdateDomain string `json:"platformUpdateDomain"` + Publisher string `json:"publisher"` + ResourceGroupName string `json:"resourceGroupName"` + Sku string `json:"sku"` + SubscriptionID string `json:"subscriptionId"` + Tags string `json:"tags"` + OSVersion string `json:"version"` + VMID string `json:"vmId"` + VMSize string `json:"vmSize"` + KernelVersion string +} + +// This is how metadata server returns in response for querying metadata +type metadataWrapper struct { + Metadata Metadata `json:"compute"` +} + // LogNetworkInterfaces logs the host's network interfaces in the default namespace. func LogNetworkInterfaces() { interfaces, err := net.Interfaces() @@ -158,3 +193,68 @@ func ReadFileByLines(filename string) ([]string, error) { return lineStrArr, nil } + +// GetHostMetadata - retrieve VM metadata from wireserver +func GetHostMetadata(fileName string) (Metadata, error) { + content, err := ioutil.ReadFile(fileName) + if err == nil { + var metadata Metadata + if err = json.Unmarshal(content, &metadata); err == nil { + return metadata, nil + } + } + + log.Printf("[Telemetry] Request metadata from wireserver") + + req, err := http.NewRequest("GET", metadataURL, nil) + if err != nil { + return Metadata{}, err + } + + req.Header.Set("Metadata", "True") + + client := &http.Client{ + Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: time.Duration(httpConnectionTimeout) * time.Second, + }).DialContext, + ResponseHeaderTimeout: time.Duration(headerTimeout) * time.Second, + }, + } + + resp, err := client.Do(req) + if err != nil { + return Metadata{}, err + } + + defer resp.Body.Close() + + metareport := metadataWrapper{} + + if resp.StatusCode != http.StatusOK { + err = fmt.Errorf("[Telemetry] Request failed with HTTP error %d", resp.StatusCode) + } else if resp.Body != nil { + err = json.NewDecoder(resp.Body).Decode(&metareport) + if err != nil { + err = fmt.Errorf("[Telemetry] Unable to decode response body due to error: %s", err.Error()) + } + } else { + err = fmt.Errorf("[Telemetry] Response body is empty") + } + + return metareport.Metadata, err +} + +// SaveHostMetadata - save metadata got from wireserver to json file +func SaveHostMetadata(metadata Metadata, fileName string) error { + dataBytes, err := json.Marshal(metadata) + if err != nil { + return fmt.Errorf("[Telemetry] marshal data failed with err %+v", err) + } + + if err = ioutil.WriteFile(fileName, dataBytes, 0644); err != nil { + log.Printf("[Telemetry] Writing metadata to file failed: %v", err) + } + + return err +} diff --git a/network/endpoint.go b/network/endpoint.go index fa1755d061..8464105c55 100644 --- a/network/endpoint.go +++ b/network/endpoint.go @@ -35,6 +35,7 @@ type endpoint struct { EnableMultitenancy bool AllowInboundFromHostToNC bool AllowInboundFromNCToHost bool + NetworkContainerID string NetworkNameSpace string `json:",omitempty"` ContainerID string PODName string `json:",omitempty"` @@ -63,6 +64,7 @@ type EndpointInfo struct { EnableMultiTenancy bool AllowInboundFromHostToNC bool AllowInboundFromNCToHost bool + NetworkContainerID string PODName string PODNameSpace string Data map[string]interface{} @@ -202,11 +204,12 @@ func (ep *endpoint) getInfo() *EndpointInfo { EnableMultiTenancy: ep.EnableMultitenancy, AllowInboundFromHostToNC: ep.AllowInboundFromHostToNC, AllowInboundFromNCToHost: ep.AllowInboundFromNCToHost, - IfName: ep.IfName, - ContainerID: ep.ContainerID, - NetNsPath: ep.NetworkNameSpace, - PODName: ep.PODName, - PODNameSpace: ep.PODNameSpace, + IfName: ep.IfName, + ContainerID: ep.ContainerID, + NetNsPath: ep.NetworkNameSpace, + PODName: ep.PODName, + PODNameSpace: ep.PODNameSpace, + NetworkContainerID: ep.NetworkContainerID, } for _, route := range ep.Routes { diff --git a/network/endpoint_windows.go b/network/endpoint_windows.go index 86cf768264..22a5ccefb6 100644 --- a/network/endpoint_windows.go +++ b/network/endpoint_windows.go @@ -9,6 +9,7 @@ import ( "net" "strings" + "github.com/Azure/azure-container-networking/cns/cnsclient" "github.com/Azure/azure-container-networking/log" "github.com/Azure/azure-container-networking/network/policy" "github.com/Microsoft/hcsshim" @@ -205,6 +206,63 @@ func (nw *network) configureHcnEndpoint(epInfo *EndpointInfo) (*hcn.HostComputeE return hcnEndpoint, nil } +func (nw *network) deleteHostNCApipaEndpoint(networkContainerID string) error { + cnsClient, err := cnsclient.GetCnsClient() + if err != nil { + log.Errorf("Failed to get CNS client. Error %v", err) + return err + } + + log.Printf("[net] Deleting HostNCApipaEndpoint for network container: %s", networkContainerID) + err = cnsClient.DeleteHostNCApipaEndpoint(networkContainerID) + log.Printf("[net] Completed HostNCApipaEndpoint deletion for network container: %s"+ + " with error: %v", networkContainerID, err) + + return nil +} + +// createHostNCApipaEndpoint creates a new endpoint in the HostNCApipaNetwork +// for host container connectivity +func (nw *network) createHostNCApipaEndpoint(epInfo *EndpointInfo) error { + var ( + err error + cnsClient *cnsclient.CNSClient + hostNCApipaEndpointID string + namespace *hcn.HostComputeNamespace + ) + + if namespace, err = hcn.GetNamespaceByID(epInfo.NetNsPath); err != nil { + return fmt.Errorf("Failed to retrieve namespace with GetNamespaceByID for NetNsPath: %s"+ + " due to error: %v", epInfo.NetNsPath, err) + } + + if cnsClient, err = cnsclient.GetCnsClient(); err != nil { + log.Errorf("Failed to get CNS client. Error %v", err) + return err + } + + log.Printf("[net] Creating HostNCApipaEndpoint for host container connectivity for NC: %s", + epInfo.NetworkContainerID) + + if hostNCApipaEndpointID, err = + cnsClient.CreateHostNCApipaEndpoint(epInfo.NetworkContainerID); err != nil { + return err + } + + defer func() { + if err != nil { + nw.deleteHostNCApipaEndpoint(epInfo.NetworkContainerID) + } + }() + + if err = hcn.AddNamespaceEndpoint(namespace.Id, hostNCApipaEndpointID); err != nil { + return fmt.Errorf("[net] Failed to add HostNCApipaEndpoint: %s to namespace: %s due to error: %v", + hostNCApipaEndpointID, namespace.Id, err) + } + + return nil +} + // newEndpointImplHnsV2 creates a new endpoint in the network using HnsV2 func (nw *network) newEndpointImplHnsV2(epInfo *EndpointInfo) (*endpoint, error) { hcnEndpoint, err := nw.configureHcnEndpoint(epInfo) @@ -240,6 +298,22 @@ func (nw *network) newEndpointImplHnsV2(epInfo *EndpointInfo) (*endpoint, error) hnsResponse.Id, namespace.Id, err) } + defer func() { + if err != nil { + if errRemoveNsEp := hcn.RemoveNamespaceEndpoint(namespace.Id, hnsResponse.Id); errRemoveNsEp != nil { + log.Printf("[net] Failed to remove endpoint: %s from namespace: %s due to error: %v", + hnsResponse.Id, hnsResponse.Id, errRemoveNsEp) + } + } + }() + + // If the Host - container connectivity is requested, create endpoint in HostNCApipaNetwork + if epInfo.AllowInboundFromHostToNC || epInfo.AllowInboundFromNCToHost { + if err = nw.createHostNCApipaEndpoint(epInfo); err != nil { + return nil, fmt.Errorf("Failed to create HostNCApipaEndpoint due to error: %v", err) + } + } + var vlanid int if epInfo.Data != nil { if vlanData, ok := epInfo.Data[VlanIDKey]; ok { @@ -264,6 +338,9 @@ func (nw *network) newEndpointImplHnsV2(epInfo *EndpointInfo) (*endpoint, error) VlanID: vlanid, EnableSnatOnHost: epInfo.EnableSnatOnHost, NetNs: epInfo.NetNsPath, + AllowInboundFromNCToHost: epInfo.AllowInboundFromNCToHost, + AllowInboundFromHostToNC: epInfo.AllowInboundFromHostToNC, + NetworkContainerID: epInfo.NetworkContainerID, } for _, route := range epInfo.Routes { @@ -299,8 +376,17 @@ func (nw *network) deleteEndpointImplHnsV1(ep *endpoint) error { // deleteEndpointImplHnsV2 deletes an existing endpoint from the network using HNS v2. func (nw *network) deleteEndpointImplHnsV2(ep *endpoint) error { - var hcnEndpoint *hcn.HostComputeEndpoint - var err error + var ( + hcnEndpoint *hcn.HostComputeEndpoint + err error + ) + + if ep.AllowInboundFromHostToNC || ep.AllowInboundFromNCToHost { + if err = nw.deleteHostNCApipaEndpoint(ep.NetworkContainerID); err != nil { + log.Errorf("[net] Failed to delete HostNCApipaEndpoint due to error: %v", err) + return err + } + } log.Printf("[net] Deleting hcn endpoint with id: %s", ep.HnsId) diff --git a/scripts/install-cni-plugin.sh b/scripts/install-cni-plugin.sh index 1975a7865d..52ed5fb62a 100644 --- a/scripts/install-cni-plugin.sh +++ b/scripts/install-cni-plugin.sh @@ -53,7 +53,7 @@ printf "done.\n" # Install loopback plugin. printf "Installing loopback CNI plugin version $CNI_VERSION to $CNI_BIN_DIR..." -/usr/bin/curl -sSL https://github.com/containernetworking/cni/releases/download/$CNI_VERSION/cni-amd64-$CNI_VERSION.tgz > $CNI_BIN_DIR/cni.tgz +/usr/bin/curl -sSL https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-amd64-$CNI_VERSION.tgz > $CNI_BIN_DIR/cni.tgz tar -xzf $CNI_BIN_DIR/cni.tgz -C $CNI_BIN_DIR ./loopback printf "done.\n" diff --git a/telemetry/telemetry.go b/telemetry/telemetry.go index 60da5704e0..397017a142 100644 --- a/telemetry/telemetry.go +++ b/telemetry/telemetry.go @@ -75,30 +75,6 @@ type OrchestratorInfo struct { ErrorMessage string } -// Metadata retrieved from wireserver -type Metadata struct { - Location string `json:"location"` - VMName string `json:"name"` - Offer string `json:"offer"` - OsType string `json:"osType"` - PlacementGroupID string `json:"placementGroupId"` - PlatformFaultDomain string `json:"platformFaultDomain"` - PlatformUpdateDomain string `json:"platformUpdateDomain"` - Publisher string `json:"publisher"` - ResourceGroupName string `json:"resourceGroupName"` - Sku string `json:"sku"` - SubscriptionID string `json:"subscriptionId"` - Tags string `json:"tags"` - OSVersion string `json:"version"` - VMID string `json:"vmId"` - VMSize string `json:"vmSize"` - KernelVersion string -} - -type metadataWrapper struct { - Metadata Metadata `json:"compute"` -} - // Azure CNI Telemetry Report structure. type CNIReport struct { IsNewInstance bool @@ -121,7 +97,7 @@ type CNIReport struct { SystemDetails SystemInfo InterfaceDetails InterfaceInfo BridgeDetails BridgeInfo - Metadata Metadata `json:"compute"` + Metadata common.Metadata `json:"compute"` } // Azure CNS Telemetry Report structure. @@ -135,7 +111,7 @@ type CNSReport struct { Timestamp string UUID string Errorcode string - Metadata Metadata `json:"compute"` + Metadata common.Metadata `json:"compute"` } // ClusterState contains the current kubernetes cluster state. @@ -158,7 +134,7 @@ type NPMReport struct { UpTime string Timestamp string ClusterState ClusterState - Metadata Metadata `json:"compute"` + Metadata common.Metadata `json:"compute"` } // DNCReport structure. @@ -176,7 +152,7 @@ type DNCReport struct { Orchestrator string ContainerType string Errorcode string - Metadata Metadata `json:"compute"` + Metadata common.Metadata `json:"compute"` } // ReportManager structure. diff --git a/telemetry/telemetry_test.go b/telemetry/telemetry_test.go index bdc3ce79e9..4df069ea00 100644 --- a/telemetry/telemetry_test.go +++ b/telemetry/telemetry_test.go @@ -45,7 +45,7 @@ var sampleCniReport = CNIReport{ IsNewInstance: false, EventMessage: "[azure-cns] Code:UnknownContainerID {IPConfiguration:{IPSubnet:{IPAddress: PrefixLength:0} DNSServers:[] GatewayIPAddress:} Routes:[] CnetAddressSpace:[] MultiTenancyInfo:{EncapType: ID:0} PrimaryInterfaceIdentifier: LocalIPConfiguration:{IPSubnet:{IPAddress: PrefixLength:0} DNSServers:[] GatewayIPAddress:} {ReturnCode:18 Message:NetworkContainer doesn't exist.}}.", Timestamp: "2019-02-27 17:44:47.319911225 +0000 UTC", - Metadata: Metadata{ + Metadata: common.Metadata{ Location: "EastUS2EUAP", VMName: "k8s-agentpool1-65609007-0", Offer: "aks", diff --git a/telemetry/telemetrybuffer.go b/telemetry/telemetrybuffer.go index adfca234b7..441f44aec3 100644 --- a/telemetry/telemetrybuffer.go +++ b/telemetry/telemetrybuffer.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/azure-container-networking/common" "github.com/Azure/azure-container-networking/log" "github.com/Azure/azure-container-networking/platform" + "github.com/Azure/azure-container-networking/store" ) // TelemetryConfig - telemetry config read by telemetry service @@ -411,14 +412,21 @@ func (tb *TelemetryBuffer) sendToHost() error { // push - push the report (x) to corresponding slice func (buf *Buffer) push(x interface{}) { - metadata, err := getHostMetadata() + metadata, err := common.GetHostMetadata(metadataFile) if err != nil { log.Logf("Error getting metadata %v", err) } else { - err = saveHostMetadata(metadata) + kvs, err := store.NewJsonFileStore(metadataFile) + if err != nil { + log.Printf("Error acuiring lock for writing metadata file: %v", err) + } + + kvs.Lock(true) + err = common.SaveHostMetadata(metadata, metadataFile) if err != nil { log.Logf("saving host metadata failed with :%v", err) } + kvs.Unlock(true) } switch x.(type) { @@ -466,62 +474,6 @@ func (buf *Buffer) reset() { payloadSize = 0 } -// saveHostMetadata - save metadata got from wireserver to json file -func saveHostMetadata(metadata Metadata) error { - dataBytes, err := json.Marshal(metadata) - if err != nil { - return fmt.Errorf("[Telemetry] marshal data failed with err %+v", err) - } - - if err = ioutil.WriteFile(metadataFile, dataBytes, 0644); err != nil { - log.Logf("[Telemetry] Writing metadata to file failed: %v", err) - } - - return err -} - -// getHostMetadata - retrieve metadata from host -func getHostMetadata() (Metadata, error) { - content, err := ioutil.ReadFile(metadataFile) - if err == nil { - var metadata Metadata - if err = json.Unmarshal(content, &metadata); err == nil { - return metadata, nil - } - } - - log.Logf("[Telemetry] Request metadata from wireserver") - - req, err := http.NewRequest("GET", metadataURL, nil) - if err != nil { - return Metadata{}, err - } - - req.Header.Set("Metadata", "True") - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return Metadata{}, err - } - - defer resp.Body.Close() - - metareport := metadataWrapper{} - - if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("[Telemetry] Request failed with HTTP error %d", resp.StatusCode) - } else if resp.Body != nil { - err = json.NewDecoder(resp.Body).Decode(&metareport) - if err != nil { - err = fmt.Errorf("[Telemetry] Unable to decode response body due to error: %s", err.Error()) - } - } else { - err = fmt.Errorf("[Telemetry] Response body is empty") - } - - return metareport.Metadata, err -} - // WaitForTelemetrySocket - Block still pipe/sock created or until max attempts retried func WaitForTelemetrySocket(maxAttempt int, waitTimeInMillisecs time.Duration) { for attempt := 0; attempt < maxAttempt; attempt++ { diff --git a/vendor/code.cloudfoundry.org/clock/LICENSE b/vendor/code.cloudfoundry.org/clock/LICENSE new file mode 100644 index 0000000000..f49a4e16e6 --- /dev/null +++ b/vendor/code.cloudfoundry.org/clock/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/code.cloudfoundry.org/clock/NOTICE b/vendor/code.cloudfoundry.org/clock/NOTICE new file mode 100644 index 0000000000..29c0e5ff07 --- /dev/null +++ b/vendor/code.cloudfoundry.org/clock/NOTICE @@ -0,0 +1,20 @@ +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +This project contains software that is Copyright (c) 2015 Pivotal Software, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/vendor/code.cloudfoundry.org/clock/README.md b/vendor/code.cloudfoundry.org/clock/README.md new file mode 100644 index 0000000000..abaf64149e --- /dev/null +++ b/vendor/code.cloudfoundry.org/clock/README.md @@ -0,0 +1,5 @@ +# clock + +**Note**: This repository should be imported as `code.cloudfoundry.org/clock`. + +Provides a `Clock` interface, useful for injecting time dependencies in tests. diff --git a/vendor/code.cloudfoundry.org/clock/clock.go b/vendor/code.cloudfoundry.org/clock/clock.go new file mode 100644 index 0000000000..6b091d99a4 --- /dev/null +++ b/vendor/code.cloudfoundry.org/clock/clock.go @@ -0,0 +1,53 @@ +package clock + +import "time" + +type Clock interface { + Now() time.Time + Sleep(d time.Duration) + Since(t time.Time) time.Duration + // After waits for the duration to elapse and then sends the current time + // on the returned channel. + // It is equivalent to clock.NewTimer(d).C. + // The underlying Timer is not recovered by the garbage collector + // until the timer fires. If efficiency is a concern, use clock.NewTimer + // instead and call Timer.Stop if the timer is no longer needed. + After(d time.Duration) <-chan time.Time + + NewTimer(d time.Duration) Timer + NewTicker(d time.Duration) Ticker +} + +type realClock struct{} + +func NewClock() Clock { + return &realClock{} +} + +func (clock *realClock) Now() time.Time { + return time.Now() +} + +func (clock *realClock) Since(t time.Time) time.Duration { + return time.Now().Sub(t) +} + +func (clock *realClock) Sleep(d time.Duration) { + <-clock.NewTimer(d).C() +} + +func (clock *realClock) After(d time.Duration) <-chan time.Time { + return clock.NewTimer(d).C() +} + +func (clock *realClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + t: time.NewTimer(d), + } +} + +func (clock *realClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + t: time.NewTicker(d), + } +} diff --git a/vendor/code.cloudfoundry.org/clock/package.go b/vendor/code.cloudfoundry.org/clock/package.go new file mode 100644 index 0000000000..349f67c82a --- /dev/null +++ b/vendor/code.cloudfoundry.org/clock/package.go @@ -0,0 +1 @@ +package clock // import "code.cloudfoundry.org/clock" diff --git a/vendor/code.cloudfoundry.org/clock/ticker.go b/vendor/code.cloudfoundry.org/clock/ticker.go new file mode 100644 index 0000000000..f25129e1c8 --- /dev/null +++ b/vendor/code.cloudfoundry.org/clock/ticker.go @@ -0,0 +1,20 @@ +package clock + +import "time" + +type Ticker interface { + C() <-chan time.Time + Stop() +} + +type realTicker struct { + t *time.Ticker +} + +func (t *realTicker) C() <-chan time.Time { + return t.t.C +} + +func (t *realTicker) Stop() { + t.t.Stop() +} diff --git a/vendor/code.cloudfoundry.org/clock/timer.go b/vendor/code.cloudfoundry.org/clock/timer.go new file mode 100644 index 0000000000..cf8c221259 --- /dev/null +++ b/vendor/code.cloudfoundry.org/clock/timer.go @@ -0,0 +1,25 @@ +package clock + +import "time" + +type Timer interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +type realTimer struct { + t *time.Timer +} + +func (t *realTimer) C() <-chan time.Time { + return t.t.C +} + +func (t *realTimer) Reset(d time.Duration) bool { + return t.t.Reset(d) +} + +func (t *realTimer) Stop() bool { + return t.t.Stop() +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/LICENSE b/vendor/github.com/Microsoft/ApplicationInsights-Go/LICENSE new file mode 100644 index 0000000000..01d022c227 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/client.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/client.go new file mode 100644 index 0000000000..d532e03a00 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/client.go @@ -0,0 +1,155 @@ +package appinsights + +import ( + "time" + + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +// Application Insights telemetry client provides interface to track telemetry +// items. +type TelemetryClient interface { + // Gets the telemetry context for this client. Values found on this + // context will get written out to every telemetry item tracked by + // this client. + Context() *TelemetryContext + + // Gets the instrumentation key assigned to this telemetry client. + InstrumentationKey() string + + // Gets the telemetry channel used to submit data to the backend. + Channel() TelemetryChannel + + // Gets whether this client is enabled and will accept telemetry. + IsEnabled() bool + + // Enables or disables the telemetry client. When disabled, telemetry + // is silently swallowed by the client. Defaults to enabled. + SetIsEnabled(enabled bool) + + // Submits the specified telemetry item. + Track(telemetry Telemetry) + + // Log a user action with the specified name + TrackEvent(name string) + + // Log a numeric value that is not specified with a specific event. + // Typically used to send regular reports of performance indicators. + TrackMetric(name string, value float64) + + // Log a trace message with the specified severity level. + TrackTrace(name string, severity contracts.SeverityLevel) + + // Log an HTTP request with the specified method, URL, duration and + // response code. + TrackRequest(method, url string, duration time.Duration, responseCode string) + + // Log a dependency with the specified name, type, target, and + // success status. + TrackRemoteDependency(name, dependencyType, target string, success bool) + + // Log an availability test result with the specified test name, + // duration, and success status. + TrackAvailability(name string, duration time.Duration, success bool) + + // Log an exception with the specified error, which may be a string, + // error or Stringer. The current callstack is collected + // automatically. + TrackException(err interface{}) +} + +type telemetryClient struct { + channel TelemetryChannel + context *TelemetryContext + isEnabled bool +} + +// Creates a new telemetry client instance that submits telemetry with the +// specified instrumentation key. +func NewTelemetryClient(iKey string) TelemetryClient { + return NewTelemetryClientFromConfig(NewTelemetryConfiguration(iKey)) +} + +// Creates a new telemetry client instance configured by the specified +// TelemetryConfiguration object. +func NewTelemetryClientFromConfig(config *TelemetryConfiguration) TelemetryClient { + return &telemetryClient{ + channel: NewInMemoryChannel(config), + context: config.setupContext(), + isEnabled: true, + } +} + +// Gets the telemetry context for this client. Values found on this context +// will get written out to every telemetry item tracked by this client. +func (tc *telemetryClient) Context() *TelemetryContext { + return tc.context +} + +// Gets the telemetry channel used to submit data to the backend. +func (tc *telemetryClient) Channel() TelemetryChannel { + return tc.channel +} + +// Gets the instrumentation key assigned to this telemetry client. +func (tc *telemetryClient) InstrumentationKey() string { + return tc.context.InstrumentationKey() +} + +// Gets whether this client is enabled and will accept telemetry. +func (tc *telemetryClient) IsEnabled() bool { + return tc.isEnabled +} + +// Enables or disables the telemetry client. When disabled, telemetry is +// silently swallowed by the client. Defaults to enabled. +func (tc *telemetryClient) SetIsEnabled(isEnabled bool) { + tc.isEnabled = isEnabled +} + +// Submits the specified telemetry item. +func (tc *telemetryClient) Track(item Telemetry) { + if tc.isEnabled && item != nil { + tc.channel.Send(tc.context.envelop(item)) + } +} + +// Log a user action with the specified name +func (tc *telemetryClient) TrackEvent(name string) { + tc.Track(NewEventTelemetry(name)) +} + +// Log a numeric value that is not specified with a specific event. +// Typically used to send regular reports of performance indicators. +func (tc *telemetryClient) TrackMetric(name string, value float64) { + tc.Track(NewMetricTelemetry(name, value)) +} + +// Log a trace message with the specified severity level. +func (tc *telemetryClient) TrackTrace(message string, severity contracts.SeverityLevel) { + tc.Track(NewTraceTelemetry(message, severity)) +} + +// Log an HTTP request with the specified method, URL, duration and response +// code. +func (tc *telemetryClient) TrackRequest(method, url string, duration time.Duration, responseCode string) { + tc.Track(NewRequestTelemetry(method, url, duration, responseCode)) +} + +// Log a dependency with the specified name, type, target, and success +// status. +func (tc *telemetryClient) TrackRemoteDependency(name, dependencyType, target string, success bool) { + tc.Track(NewRemoteDependencyTelemetry(name, dependencyType, target, success)) +} + +// Log an availability test result with the specified test name, duration, +// and success status. +func (tc *telemetryClient) TrackAvailability(name string, duration time.Duration, success bool) { + tc.Track(NewAvailabilityTelemetry(name, duration, success)) +} + +// Log an exception with the specified error, which may be a string, error +// or Stringer. The current callstack is collected automatically. +func (tc *telemetryClient) TrackException(err interface{}) { + tc.Track(newExceptionTelemetry(err, 1)) +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/clock.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/clock.go new file mode 100644 index 0000000000..1178b9eaa7 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/clock.go @@ -0,0 +1,11 @@ +package appinsights + +// We need to mock out the clock for tests; we'll use this to do it. + +import "code.cloudfoundry.org/clock" + +var currentClock clock.Clock + +func init() { + currentClock = clock.NewClock() +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/configuration.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/configuration.go new file mode 100644 index 0000000000..f7fa76b3a8 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/configuration.go @@ -0,0 +1,48 @@ +package appinsights + +import ( + "os" + "runtime" + "time" +) + +// Configuration data used to initialize a new TelemetryClient. +type TelemetryConfiguration struct { + // Instrumentation key for the client. + InstrumentationKey string + + // Endpoint URL where data will be submitted. + EndpointUrl string + + // Maximum number of telemetry items that can be submitted in each + // request. If this many items are buffered, the buffer will be + // flushed before MaxBatchInterval expires. + MaxBatchSize int + + // Maximum time to wait before sending a batch of telemetry. + MaxBatchInterval time.Duration +} + +// Creates a new TelemetryConfiguration object with the specified +// instrumentation key and default values. +func NewTelemetryConfiguration(instrumentationKey string) *TelemetryConfiguration { + return &TelemetryConfiguration{ + InstrumentationKey: instrumentationKey, + EndpointUrl: "https://dc.services.visualstudio.com/v2/track", + MaxBatchSize: 1024, + MaxBatchInterval: time.Duration(10) * time.Second, + } +} + +func (config *TelemetryConfiguration) setupContext() *TelemetryContext { + context := NewTelemetryContext(config.InstrumentationKey) + context.Tags.Internal().SetSdkVersion(sdkName + ":" + Version) + context.Tags.Device().SetOsVersion(runtime.GOOS) + + if hostname, err := os.Hostname(); err == nil { + context.Tags.Device().SetId(hostname) + context.Tags.Cloud().SetRoleInstance(hostname) + } + + return context +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/constants.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/constants.go new file mode 100644 index 0000000000..060ed59d4e --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/constants.go @@ -0,0 +1,20 @@ +package appinsights + +// NOTE: This file was automatically generated. + +import "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" + +// Type of the metric data measurement. +const ( + Measurement contracts.DataPointType = contracts.Measurement + Aggregation contracts.DataPointType = contracts.Aggregation +) + +// Defines the level of severity for the event. +const ( + Verbose contracts.SeverityLevel = contracts.Verbose + Information contracts.SeverityLevel = contracts.Information + Warning contracts.SeverityLevel = contracts.Warning + Error contracts.SeverityLevel = contracts.Error + Critical contracts.SeverityLevel = contracts.Critical +) diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/availabilitydata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/availabilitydata.go new file mode 100644 index 0000000000..4f0d709f5c --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/availabilitydata.go @@ -0,0 +1,111 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Instances of AvailabilityData represent the result of executing an +// availability test. +type AvailabilityData struct { + Domain + + // Schema version + Ver int `json:"ver"` + + // Identifier of a test run. Use it to correlate steps of test run and + // telemetry generated by the service. + Id string `json:"id"` + + // Name of the test that these availability results represent. + Name string `json:"name"` + + // Duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 days. + Duration string `json:"duration"` + + // Success flag. + Success bool `json:"success"` + + // Name of the location where the test was run from. + RunLocation string `json:"runLocation"` + + // Diagnostic message for the result. + Message string `json:"message"` + + // Collection of custom properties. + Properties map[string]string `json:"properties,omitempty"` + + // Collection of custom measurements. + Measurements map[string]float64 `json:"measurements,omitempty"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *AvailabilityData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".Availability" + } else { + return "Microsoft.ApplicationInsights.Availability" + } +} + +// Returns the base type when placed within a Data object container. +func (data *AvailabilityData) BaseType() string { + return "AvailabilityData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *AvailabilityData) Sanitize() []string { + var warnings []string + + if len(data.Id) > 64 { + data.Id = data.Id[:64] + warnings = append(warnings, "AvailabilityData.Id exceeded maximum length of 64") + } + + if len(data.Name) > 1024 { + data.Name = data.Name[:1024] + warnings = append(warnings, "AvailabilityData.Name exceeded maximum length of 1024") + } + + if len(data.RunLocation) > 1024 { + data.RunLocation = data.RunLocation[:1024] + warnings = append(warnings, "AvailabilityData.RunLocation exceeded maximum length of 1024") + } + + if len(data.Message) > 8192 { + data.Message = data.Message[:8192] + warnings = append(warnings, "AvailabilityData.Message exceeded maximum length of 8192") + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "AvailabilityData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "AvailabilityData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + if data.Measurements != nil { + for k, v := range data.Measurements { + if len(k) > 150 { + data.Measurements[k[:150]] = v + delete(data.Measurements, k) + warnings = append(warnings, "AvailabilityData.Measurements has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new AvailabilityData instance with default values set by the schema. +func NewAvailabilityData() *AvailabilityData { + return &AvailabilityData{ + Ver: 2, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/base.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/base.go new file mode 100644 index 0000000000..3ceb5022f2 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/base.go @@ -0,0 +1,25 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Data struct to contain only C section with custom fields. +type Base struct { + + // Name of item (B section) if any. If telemetry data is derived straight from + // this, this should be null. + BaseType string `json:"baseType"` +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *Base) Sanitize() []string { + var warnings []string + + return warnings +} + +// Creates a new Base instance with default values set by the schema. +func NewBase() *Base { + return &Base{} +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/contexttagkeys.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/contexttagkeys.go new file mode 100644 index 0000000000..eaf57abb31 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/contexttagkeys.go @@ -0,0 +1,153 @@ +package contracts + +// NOTE: This file was automatically generated. + +import "strconv" + +const ( + // Application version. Information in the application context fields is + // always about the application that is sending the telemetry. + ApplicationVersion string = "ai.application.ver" + + // Unique client device id. Computer name in most cases. + DeviceId string = "ai.device.id" + + // Device locale using - pattern, following RFC 5646. + // Example 'en-US'. + DeviceLocale string = "ai.device.locale" + + // Model of the device the end user of the application is using. Used for + // client scenarios. If this field is empty then it is derived from the user + // agent. + DeviceModel string = "ai.device.model" + + // Client device OEM name taken from the browser. + DeviceOEMName string = "ai.device.oemName" + + // Operating system name and version of the device the end user of the + // application is using. If this field is empty then it is derived from the + // user agent. Example 'Windows 10 Pro 10.0.10586.0' + DeviceOSVersion string = "ai.device.osVersion" + + // The type of the device the end user of the application is using. Used + // primarily to distinguish JavaScript telemetry from server side telemetry. + // Examples: 'PC', 'Phone', 'Browser'. 'PC' is the default value. + DeviceType string = "ai.device.type" + + // The IP address of the client device. IPv4 and IPv6 are supported. + // Information in the location context fields is always about the end user. + // When telemetry is sent from a service, the location context is about the + // user that initiated the operation in the service. + LocationIp string = "ai.location.ip" + + // A unique identifier for the operation instance. The operation.id is created + // by either a request or a page view. All other telemetry sets this to the + // value for the containing request or page view. Operation.id is used for + // finding all the telemetry items for a specific operation instance. + OperationId string = "ai.operation.id" + + // The name (group) of the operation. The operation.name is created by either + // a request or a page view. All other telemetry items set this to the value + // for the containing request or page view. Operation.name is used for finding + // all the telemetry items for a group of operations (i.e. 'GET Home/Index'). + OperationName string = "ai.operation.name" + + // The unique identifier of the telemetry item's immediate parent. + OperationParentId string = "ai.operation.parentId" + + // Name of synthetic source. Some telemetry from the application may represent + // a synthetic traffic. It may be web crawler indexing the web site, site + // availability tests or traces from diagnostic libraries like Application + // Insights SDK itself. + OperationSyntheticSource string = "ai.operation.syntheticSource" + + // The correlation vector is a light weight vector clock which can be used to + // identify and order related events across clients and services. + OperationCorrelationVector string = "ai.operation.correlationVector" + + // Session ID - the instance of the user's interaction with the app. + // Information in the session context fields is always about the end user. + // When telemetry is sent from a service, the session context is about the + // user that initiated the operation in the service. + SessionId string = "ai.session.id" + + // Boolean value indicating whether the session identified by ai.session.id is + // first for the user or not. + SessionIsFirst string = "ai.session.isFirst" + + // In multi-tenant applications this is the account ID or name which the user + // is acting with. Examples may be subscription ID for Azure portal or blog + // name blogging platform. + UserAccountId string = "ai.user.accountId" + + // Anonymous user id. Represents the end user of the application. When + // telemetry is sent from a service, the user context is about the user that + // initiated the operation in the service. + UserId string = "ai.user.id" + + // Authenticated user id. The opposite of ai.user.id, this represents the user + // with a friendly name. Since it's PII information it is not collected by + // default by most SDKs. + UserAuthUserId string = "ai.user.authUserId" + + // Name of the role the application is a part of. Maps directly to the role + // name in azure. + CloudRole string = "ai.cloud.role" + + // Name of the instance where the application is running. Computer name for + // on-premisis, instance name for Azure. + CloudRoleInstance string = "ai.cloud.roleInstance" + + // SDK version. See + // https://github.com/microsoft/ApplicationInsights-Home/blob/master/SDK-AUTHORING.md#sdk-version-specification + // for information. + InternalSdkVersion string = "ai.internal.sdkVersion" + + // Agent version. Used to indicate the version of StatusMonitor installed on + // the computer if it is used for data collection. + InternalAgentVersion string = "ai.internal.agentVersion" + + // This is the node name used for billing purposes. Use it to override the + // standard detection of nodes. + InternalNodeName string = "ai.internal.nodeName" +) + +var tagMaxLengths = map[string]int{ + "ai.application.ver": 1024, + "ai.device.id": 1024, + "ai.device.locale": 64, + "ai.device.model": 256, + "ai.device.oemName": 256, + "ai.device.osVersion": 256, + "ai.device.type": 64, + "ai.location.ip": 46, + "ai.operation.id": 128, + "ai.operation.name": 1024, + "ai.operation.parentId": 128, + "ai.operation.syntheticSource": 1024, + "ai.operation.correlationVector": 64, + "ai.session.id": 64, + "ai.session.isFirst": 5, + "ai.user.accountId": 1024, + "ai.user.id": 128, + "ai.user.authUserId": 1024, + "ai.cloud.role": 256, + "ai.cloud.roleInstance": 256, + "ai.internal.sdkVersion": 64, + "ai.internal.agentVersion": 64, + "ai.internal.nodeName": 256, +} + +// Truncates tag values that exceed their maximum supported lengths. Returns +// warnings for each affected field. +func SanitizeTags(tags map[string]string) []string { + var warnings []string + for k, v := range tags { + if maxlen, ok := tagMaxLengths[k]; ok && len(v) > maxlen { + tags[k] = v[:maxlen] + warnings = append(warnings, "Value for "+k+" exceeded maximum length of "+strconv.Itoa(maxlen)) + } + } + + return warnings +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/contexttags.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/contexttags.go new file mode 100644 index 0000000000..426378318b --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/contexttags.go @@ -0,0 +1,565 @@ +package contracts + +// NOTE: This file was automatically generated. + +type ContextTags map[string]string + +// Helper type that provides access to context fields grouped under 'application'. +// This is returned by TelemetryContext.Tags.Application() +type ApplicationContextTags ContextTags + +// Helper type that provides access to context fields grouped under 'device'. +// This is returned by TelemetryContext.Tags.Device() +type DeviceContextTags ContextTags + +// Helper type that provides access to context fields grouped under 'location'. +// This is returned by TelemetryContext.Tags.Location() +type LocationContextTags ContextTags + +// Helper type that provides access to context fields grouped under 'operation'. +// This is returned by TelemetryContext.Tags.Operation() +type OperationContextTags ContextTags + +// Helper type that provides access to context fields grouped under 'session'. +// This is returned by TelemetryContext.Tags.Session() +type SessionContextTags ContextTags + +// Helper type that provides access to context fields grouped under 'user'. +// This is returned by TelemetryContext.Tags.User() +type UserContextTags ContextTags + +// Helper type that provides access to context fields grouped under 'cloud'. +// This is returned by TelemetryContext.Tags.Cloud() +type CloudContextTags ContextTags + +// Helper type that provides access to context fields grouped under 'internal'. +// This is returned by TelemetryContext.Tags.Internal() +type InternalContextTags ContextTags + +// Returns a helper to access context fields grouped under 'application'. +func (tags ContextTags) Application() ApplicationContextTags { + return ApplicationContextTags(tags) +} + +// Returns a helper to access context fields grouped under 'device'. +func (tags ContextTags) Device() DeviceContextTags { + return DeviceContextTags(tags) +} + +// Returns a helper to access context fields grouped under 'location'. +func (tags ContextTags) Location() LocationContextTags { + return LocationContextTags(tags) +} + +// Returns a helper to access context fields grouped under 'operation'. +func (tags ContextTags) Operation() OperationContextTags { + return OperationContextTags(tags) +} + +// Returns a helper to access context fields grouped under 'session'. +func (tags ContextTags) Session() SessionContextTags { + return SessionContextTags(tags) +} + +// Returns a helper to access context fields grouped under 'user'. +func (tags ContextTags) User() UserContextTags { + return UserContextTags(tags) +} + +// Returns a helper to access context fields grouped under 'cloud'. +func (tags ContextTags) Cloud() CloudContextTags { + return CloudContextTags(tags) +} + +// Returns a helper to access context fields grouped under 'internal'. +func (tags ContextTags) Internal() InternalContextTags { + return InternalContextTags(tags) +} + +// Application version. Information in the application context fields is +// always about the application that is sending the telemetry. +func (tags ApplicationContextTags) GetVer() string { + if result, ok := tags["ai.application.ver"]; ok { + return result + } + + return "" +} + +// Application version. Information in the application context fields is +// always about the application that is sending the telemetry. +func (tags ApplicationContextTags) SetVer(value string) { + if value != "" { + tags["ai.application.ver"] = value + } else { + delete(tags, "ai.application.ver") + } +} + +// Unique client device id. Computer name in most cases. +func (tags DeviceContextTags) GetId() string { + if result, ok := tags["ai.device.id"]; ok { + return result + } + + return "" +} + +// Unique client device id. Computer name in most cases. +func (tags DeviceContextTags) SetId(value string) { + if value != "" { + tags["ai.device.id"] = value + } else { + delete(tags, "ai.device.id") + } +} + +// Device locale using - pattern, following RFC 5646. +// Example 'en-US'. +func (tags DeviceContextTags) GetLocale() string { + if result, ok := tags["ai.device.locale"]; ok { + return result + } + + return "" +} + +// Device locale using - pattern, following RFC 5646. +// Example 'en-US'. +func (tags DeviceContextTags) SetLocale(value string) { + if value != "" { + tags["ai.device.locale"] = value + } else { + delete(tags, "ai.device.locale") + } +} + +// Model of the device the end user of the application is using. Used for +// client scenarios. If this field is empty then it is derived from the user +// agent. +func (tags DeviceContextTags) GetModel() string { + if result, ok := tags["ai.device.model"]; ok { + return result + } + + return "" +} + +// Model of the device the end user of the application is using. Used for +// client scenarios. If this field is empty then it is derived from the user +// agent. +func (tags DeviceContextTags) SetModel(value string) { + if value != "" { + tags["ai.device.model"] = value + } else { + delete(tags, "ai.device.model") + } +} + +// Client device OEM name taken from the browser. +func (tags DeviceContextTags) GetOemName() string { + if result, ok := tags["ai.device.oemName"]; ok { + return result + } + + return "" +} + +// Client device OEM name taken from the browser. +func (tags DeviceContextTags) SetOemName(value string) { + if value != "" { + tags["ai.device.oemName"] = value + } else { + delete(tags, "ai.device.oemName") + } +} + +// Operating system name and version of the device the end user of the +// application is using. If this field is empty then it is derived from the +// user agent. Example 'Windows 10 Pro 10.0.10586.0' +func (tags DeviceContextTags) GetOsVersion() string { + if result, ok := tags["ai.device.osVersion"]; ok { + return result + } + + return "" +} + +// Operating system name and version of the device the end user of the +// application is using. If this field is empty then it is derived from the +// user agent. Example 'Windows 10 Pro 10.0.10586.0' +func (tags DeviceContextTags) SetOsVersion(value string) { + if value != "" { + tags["ai.device.osVersion"] = value + } else { + delete(tags, "ai.device.osVersion") + } +} + +// The type of the device the end user of the application is using. Used +// primarily to distinguish JavaScript telemetry from server side telemetry. +// Examples: 'PC', 'Phone', 'Browser'. 'PC' is the default value. +func (tags DeviceContextTags) GetType() string { + if result, ok := tags["ai.device.type"]; ok { + return result + } + + return "" +} + +// The type of the device the end user of the application is using. Used +// primarily to distinguish JavaScript telemetry from server side telemetry. +// Examples: 'PC', 'Phone', 'Browser'. 'PC' is the default value. +func (tags DeviceContextTags) SetType(value string) { + if value != "" { + tags["ai.device.type"] = value + } else { + delete(tags, "ai.device.type") + } +} + +// The IP address of the client device. IPv4 and IPv6 are supported. +// Information in the location context fields is always about the end user. +// When telemetry is sent from a service, the location context is about the +// user that initiated the operation in the service. +func (tags LocationContextTags) GetIp() string { + if result, ok := tags["ai.location.ip"]; ok { + return result + } + + return "" +} + +// The IP address of the client device. IPv4 and IPv6 are supported. +// Information in the location context fields is always about the end user. +// When telemetry is sent from a service, the location context is about the +// user that initiated the operation in the service. +func (tags LocationContextTags) SetIp(value string) { + if value != "" { + tags["ai.location.ip"] = value + } else { + delete(tags, "ai.location.ip") + } +} + +// A unique identifier for the operation instance. The operation.id is created +// by either a request or a page view. All other telemetry sets this to the +// value for the containing request or page view. Operation.id is used for +// finding all the telemetry items for a specific operation instance. +func (tags OperationContextTags) GetId() string { + if result, ok := tags["ai.operation.id"]; ok { + return result + } + + return "" +} + +// A unique identifier for the operation instance. The operation.id is created +// by either a request or a page view. All other telemetry sets this to the +// value for the containing request or page view. Operation.id is used for +// finding all the telemetry items for a specific operation instance. +func (tags OperationContextTags) SetId(value string) { + if value != "" { + tags["ai.operation.id"] = value + } else { + delete(tags, "ai.operation.id") + } +} + +// The name (group) of the operation. The operation.name is created by either +// a request or a page view. All other telemetry items set this to the value +// for the containing request or page view. Operation.name is used for finding +// all the telemetry items for a group of operations (i.e. 'GET Home/Index'). +func (tags OperationContextTags) GetName() string { + if result, ok := tags["ai.operation.name"]; ok { + return result + } + + return "" +} + +// The name (group) of the operation. The operation.name is created by either +// a request or a page view. All other telemetry items set this to the value +// for the containing request or page view. Operation.name is used for finding +// all the telemetry items for a group of operations (i.e. 'GET Home/Index'). +func (tags OperationContextTags) SetName(value string) { + if value != "" { + tags["ai.operation.name"] = value + } else { + delete(tags, "ai.operation.name") + } +} + +// The unique identifier of the telemetry item's immediate parent. +func (tags OperationContextTags) GetParentId() string { + if result, ok := tags["ai.operation.parentId"]; ok { + return result + } + + return "" +} + +// The unique identifier of the telemetry item's immediate parent. +func (tags OperationContextTags) SetParentId(value string) { + if value != "" { + tags["ai.operation.parentId"] = value + } else { + delete(tags, "ai.operation.parentId") + } +} + +// Name of synthetic source. Some telemetry from the application may represent +// a synthetic traffic. It may be web crawler indexing the web site, site +// availability tests or traces from diagnostic libraries like Application +// Insights SDK itself. +func (tags OperationContextTags) GetSyntheticSource() string { + if result, ok := tags["ai.operation.syntheticSource"]; ok { + return result + } + + return "" +} + +// Name of synthetic source. Some telemetry from the application may represent +// a synthetic traffic. It may be web crawler indexing the web site, site +// availability tests or traces from diagnostic libraries like Application +// Insights SDK itself. +func (tags OperationContextTags) SetSyntheticSource(value string) { + if value != "" { + tags["ai.operation.syntheticSource"] = value + } else { + delete(tags, "ai.operation.syntheticSource") + } +} + +// The correlation vector is a light weight vector clock which can be used to +// identify and order related events across clients and services. +func (tags OperationContextTags) GetCorrelationVector() string { + if result, ok := tags["ai.operation.correlationVector"]; ok { + return result + } + + return "" +} + +// The correlation vector is a light weight vector clock which can be used to +// identify and order related events across clients and services. +func (tags OperationContextTags) SetCorrelationVector(value string) { + if value != "" { + tags["ai.operation.correlationVector"] = value + } else { + delete(tags, "ai.operation.correlationVector") + } +} + +// Session ID - the instance of the user's interaction with the app. +// Information in the session context fields is always about the end user. +// When telemetry is sent from a service, the session context is about the +// user that initiated the operation in the service. +func (tags SessionContextTags) GetId() string { + if result, ok := tags["ai.session.id"]; ok { + return result + } + + return "" +} + +// Session ID - the instance of the user's interaction with the app. +// Information in the session context fields is always about the end user. +// When telemetry is sent from a service, the session context is about the +// user that initiated the operation in the service. +func (tags SessionContextTags) SetId(value string) { + if value != "" { + tags["ai.session.id"] = value + } else { + delete(tags, "ai.session.id") + } +} + +// Boolean value indicating whether the session identified by ai.session.id is +// first for the user or not. +func (tags SessionContextTags) GetIsFirst() string { + if result, ok := tags["ai.session.isFirst"]; ok { + return result + } + + return "" +} + +// Boolean value indicating whether the session identified by ai.session.id is +// first for the user or not. +func (tags SessionContextTags) SetIsFirst(value string) { + if value != "" { + tags["ai.session.isFirst"] = value + } else { + delete(tags, "ai.session.isFirst") + } +} + +// In multi-tenant applications this is the account ID or name which the user +// is acting with. Examples may be subscription ID for Azure portal or blog +// name blogging platform. +func (tags UserContextTags) GetAccountId() string { + if result, ok := tags["ai.user.accountId"]; ok { + return result + } + + return "" +} + +// In multi-tenant applications this is the account ID or name which the user +// is acting with. Examples may be subscription ID for Azure portal or blog +// name blogging platform. +func (tags UserContextTags) SetAccountId(value string) { + if value != "" { + tags["ai.user.accountId"] = value + } else { + delete(tags, "ai.user.accountId") + } +} + +// Anonymous user id. Represents the end user of the application. When +// telemetry is sent from a service, the user context is about the user that +// initiated the operation in the service. +func (tags UserContextTags) GetId() string { + if result, ok := tags["ai.user.id"]; ok { + return result + } + + return "" +} + +// Anonymous user id. Represents the end user of the application. When +// telemetry is sent from a service, the user context is about the user that +// initiated the operation in the service. +func (tags UserContextTags) SetId(value string) { + if value != "" { + tags["ai.user.id"] = value + } else { + delete(tags, "ai.user.id") + } +} + +// Authenticated user id. The opposite of ai.user.id, this represents the user +// with a friendly name. Since it's PII information it is not collected by +// default by most SDKs. +func (tags UserContextTags) GetAuthUserId() string { + if result, ok := tags["ai.user.authUserId"]; ok { + return result + } + + return "" +} + +// Authenticated user id. The opposite of ai.user.id, this represents the user +// with a friendly name. Since it's PII information it is not collected by +// default by most SDKs. +func (tags UserContextTags) SetAuthUserId(value string) { + if value != "" { + tags["ai.user.authUserId"] = value + } else { + delete(tags, "ai.user.authUserId") + } +} + +// Name of the role the application is a part of. Maps directly to the role +// name in azure. +func (tags CloudContextTags) GetRole() string { + if result, ok := tags["ai.cloud.role"]; ok { + return result + } + + return "" +} + +// Name of the role the application is a part of. Maps directly to the role +// name in azure. +func (tags CloudContextTags) SetRole(value string) { + if value != "" { + tags["ai.cloud.role"] = value + } else { + delete(tags, "ai.cloud.role") + } +} + +// Name of the instance where the application is running. Computer name for +// on-premisis, instance name for Azure. +func (tags CloudContextTags) GetRoleInstance() string { + if result, ok := tags["ai.cloud.roleInstance"]; ok { + return result + } + + return "" +} + +// Name of the instance where the application is running. Computer name for +// on-premisis, instance name for Azure. +func (tags CloudContextTags) SetRoleInstance(value string) { + if value != "" { + tags["ai.cloud.roleInstance"] = value + } else { + delete(tags, "ai.cloud.roleInstance") + } +} + +// SDK version. See +// https://github.com/microsoft/ApplicationInsights-Home/blob/master/SDK-AUTHORING.md#sdk-version-specification +// for information. +func (tags InternalContextTags) GetSdkVersion() string { + if result, ok := tags["ai.internal.sdkVersion"]; ok { + return result + } + + return "" +} + +// SDK version. See +// https://github.com/microsoft/ApplicationInsights-Home/blob/master/SDK-AUTHORING.md#sdk-version-specification +// for information. +func (tags InternalContextTags) SetSdkVersion(value string) { + if value != "" { + tags["ai.internal.sdkVersion"] = value + } else { + delete(tags, "ai.internal.sdkVersion") + } +} + +// Agent version. Used to indicate the version of StatusMonitor installed on +// the computer if it is used for data collection. +func (tags InternalContextTags) GetAgentVersion() string { + if result, ok := tags["ai.internal.agentVersion"]; ok { + return result + } + + return "" +} + +// Agent version. Used to indicate the version of StatusMonitor installed on +// the computer if it is used for data collection. +func (tags InternalContextTags) SetAgentVersion(value string) { + if value != "" { + tags["ai.internal.agentVersion"] = value + } else { + delete(tags, "ai.internal.agentVersion") + } +} + +// This is the node name used for billing purposes. Use it to override the +// standard detection of nodes. +func (tags InternalContextTags) GetNodeName() string { + if result, ok := tags["ai.internal.nodeName"]; ok { + return result + } + + return "" +} + +// This is the node name used for billing purposes. Use it to override the +// standard detection of nodes. +func (tags InternalContextTags) SetNodeName(value string) { + if value != "" { + tags["ai.internal.nodeName"] = value + } else { + delete(tags, "ai.internal.nodeName") + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/data.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/data.go new file mode 100644 index 0000000000..144b7a8e50 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/data.go @@ -0,0 +1,25 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Data struct to contain both B and C sections. +type Data struct { + Base + + // Container for data item (B section). + BaseData interface{} `json:"baseData"` +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *Data) Sanitize() []string { + var warnings []string + + return warnings +} + +// Creates a new Data instance with default values set by the schema. +func NewData() *Data { + return &Data{} +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/datapoint.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/datapoint.go new file mode 100644 index 0000000000..b06beb1e5c --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/datapoint.go @@ -0,0 +1,54 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Metric data single measurement. +type DataPoint struct { + + // Name of the metric. + Name string `json:"name"` + + // Metric type. Single measurement or the aggregated value. + Kind DataPointType `json:"kind"` + + // Single value for measurement. Sum of individual measurements for the + // aggregation. + Value float64 `json:"value"` + + // Metric weight of the aggregated metric. Should not be set for a + // measurement. + Count int `json:"count"` + + // Minimum value of the aggregated metric. Should not be set for a + // measurement. + Min float64 `json:"min"` + + // Maximum value of the aggregated metric. Should not be set for a + // measurement. + Max float64 `json:"max"` + + // Standard deviation of the aggregated metric. Should not be set for a + // measurement. + StdDev float64 `json:"stdDev"` +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *DataPoint) Sanitize() []string { + var warnings []string + + if len(data.Name) > 1024 { + data.Name = data.Name[:1024] + warnings = append(warnings, "DataPoint.Name exceeded maximum length of 1024") + } + + return warnings +} + +// Creates a new DataPoint instance with default values set by the schema. +func NewDataPoint() *DataPoint { + return &DataPoint{ + Kind: Measurement, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/datapointtype.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/datapointtype.go new file mode 100644 index 0000000000..8f468e7a3c --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/datapointtype.go @@ -0,0 +1,22 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Type of the metric data measurement. +type DataPointType int + +const ( + Measurement DataPointType = 0 + Aggregation DataPointType = 1 +) + +func (value DataPointType) String() string { + switch int(value) { + case 0: + return "Measurement" + case 1: + return "Aggregation" + default: + return "" + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/domain.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/domain.go new file mode 100644 index 0000000000..024945baec --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/domain.go @@ -0,0 +1,21 @@ +package contracts + +// NOTE: This file was automatically generated. + +// The abstract common base of all domains. +type Domain struct { +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *Domain) Sanitize() []string { + var warnings []string + + return warnings +} + +// Creates a new Domain instance with default values set by the schema. +func NewDomain() *Domain { + return &Domain{} +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/envelope.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/envelope.go new file mode 100644 index 0000000000..91c80a9d5d --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/envelope.go @@ -0,0 +1,82 @@ +package contracts + +// NOTE: This file was automatically generated. + +// System variables for a telemetry item. +type Envelope struct { + + // Envelope version. For internal use only. By assigning this the default, it + // will not be serialized within the payload unless changed to a value other + // than #1. + Ver int `json:"ver"` + + // Type name of telemetry data item. + Name string `json:"name"` + + // Event date time when telemetry item was created. This is the wall clock + // time on the client when the event was generated. There is no guarantee that + // the client's time is accurate. This field must be formatted in UTC ISO 8601 + // format, with a trailing 'Z' character, as described publicly on + // https://en.wikipedia.org/wiki/ISO_8601#UTC. Note: the number of decimal + // seconds digits provided are variable (and unspecified). Consumers should + // handle this, i.e. managed code consumers should not use format 'O' for + // parsing as it specifies a fixed length. Example: + // 2009-06-15T13:45:30.0000000Z. + Time string `json:"time"` + + // Sampling rate used in application. This telemetry item represents 1 / + // sampleRate actual telemetry items. + SampleRate float64 `json:"sampleRate"` + + // Sequence field used to track absolute order of uploaded events. + Seq string `json:"seq"` + + // The application's instrumentation key. The key is typically represented as + // a GUID, but there are cases when it is not a guid. No code should rely on + // iKey being a GUID. Instrumentation key is case insensitive. + IKey string `json:"iKey"` + + // Key/value collection of context properties. See ContextTagKeys for + // information on available properties. + Tags map[string]string `json:"tags,omitempty"` + + // Telemetry data item. + Data interface{} `json:"data"` +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *Envelope) Sanitize() []string { + var warnings []string + + if len(data.Name) > 1024 { + data.Name = data.Name[:1024] + warnings = append(warnings, "Envelope.Name exceeded maximum length of 1024") + } + + if len(data.Time) > 64 { + data.Time = data.Time[:64] + warnings = append(warnings, "Envelope.Time exceeded maximum length of 64") + } + + if len(data.Seq) > 64 { + data.Seq = data.Seq[:64] + warnings = append(warnings, "Envelope.Seq exceeded maximum length of 64") + } + + if len(data.IKey) > 40 { + data.IKey = data.IKey[:40] + warnings = append(warnings, "Envelope.IKey exceeded maximum length of 40") + } + + return warnings +} + +// Creates a new Envelope instance with default values set by the schema. +func NewEnvelope() *Envelope { + return &Envelope{ + Ver: 1, + SampleRate: 100.0, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/eventdata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/eventdata.go new file mode 100644 index 0000000000..2093c74fd0 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/eventdata.go @@ -0,0 +1,82 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Instances of Event represent structured event records that can be grouped +// and searched by their properties. Event data item also creates a metric of +// event count by name. +type EventData struct { + Domain + + // Schema version + Ver int `json:"ver"` + + // Event name. Keep it low cardinality to allow proper grouping and useful + // metrics. + Name string `json:"name"` + + // Collection of custom properties. + Properties map[string]string `json:"properties,omitempty"` + + // Collection of custom measurements. + Measurements map[string]float64 `json:"measurements,omitempty"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *EventData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".Event" + } else { + return "Microsoft.ApplicationInsights.Event" + } +} + +// Returns the base type when placed within a Data object container. +func (data *EventData) BaseType() string { + return "EventData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *EventData) Sanitize() []string { + var warnings []string + + if len(data.Name) > 512 { + data.Name = data.Name[:512] + warnings = append(warnings, "EventData.Name exceeded maximum length of 512") + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "EventData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "EventData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + if data.Measurements != nil { + for k, v := range data.Measurements { + if len(k) > 150 { + data.Measurements[k[:150]] = v + delete(data.Measurements, k) + warnings = append(warnings, "EventData.Measurements has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new EventData instance with default values set by the schema. +func NewEventData() *EventData { + return &EventData{ + Ver: 2, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/exceptiondata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/exceptiondata.go new file mode 100644 index 0000000000..fe1c2f2b8e --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/exceptiondata.go @@ -0,0 +1,93 @@ +package contracts + +// NOTE: This file was automatically generated. + +// An instance of Exception represents a handled or unhandled exception that +// occurred during execution of the monitored application. +type ExceptionData struct { + Domain + + // Schema version + Ver int `json:"ver"` + + // Exception chain - list of inner exceptions. + Exceptions []*ExceptionDetails `json:"exceptions"` + + // Severity level. Mostly used to indicate exception severity level when it is + // reported by logging library. + SeverityLevel SeverityLevel `json:"severityLevel"` + + // Identifier of where the exception was thrown in code. Used for exceptions + // grouping. Typically a combination of exception type and a function from the + // call stack. + ProblemId string `json:"problemId"` + + // Collection of custom properties. + Properties map[string]string `json:"properties,omitempty"` + + // Collection of custom measurements. + Measurements map[string]float64 `json:"measurements,omitempty"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *ExceptionData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".Exception" + } else { + return "Microsoft.ApplicationInsights.Exception" + } +} + +// Returns the base type when placed within a Data object container. +func (data *ExceptionData) BaseType() string { + return "ExceptionData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *ExceptionData) Sanitize() []string { + var warnings []string + + for _, ptr := range data.Exceptions { + warnings = append(warnings, ptr.Sanitize()...) + } + + if len(data.ProblemId) > 1024 { + data.ProblemId = data.ProblemId[:1024] + warnings = append(warnings, "ExceptionData.ProblemId exceeded maximum length of 1024") + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "ExceptionData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "ExceptionData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + if data.Measurements != nil { + for k, v := range data.Measurements { + if len(k) > 150 { + data.Measurements[k[:150]] = v + delete(data.Measurements, k) + warnings = append(warnings, "ExceptionData.Measurements has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new ExceptionData instance with default values set by the schema. +func NewExceptionData() *ExceptionData { + return &ExceptionData{ + Ver: 2, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/exceptiondetails.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/exceptiondetails.go new file mode 100644 index 0000000000..8b768ab6cf --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/exceptiondetails.go @@ -0,0 +1,66 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Exception details of the exception in a chain. +type ExceptionDetails struct { + + // In case exception is nested (outer exception contains inner one), the id + // and outerId properties are used to represent the nesting. + Id int `json:"id"` + + // The value of outerId is a reference to an element in ExceptionDetails that + // represents the outer exception + OuterId int `json:"outerId"` + + // Exception type name. + TypeName string `json:"typeName"` + + // Exception message. + Message string `json:"message"` + + // Indicates if full exception stack is provided in the exception. The stack + // may be trimmed, such as in the case of a StackOverflow exception. + HasFullStack bool `json:"hasFullStack"` + + // Text describing the stack. Either stack or parsedStack should have a value. + Stack string `json:"stack"` + + // List of stack frames. Either stack or parsedStack should have a value. + ParsedStack []*StackFrame `json:"parsedStack,omitempty"` +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *ExceptionDetails) Sanitize() []string { + var warnings []string + + if len(data.TypeName) > 1024 { + data.TypeName = data.TypeName[:1024] + warnings = append(warnings, "ExceptionDetails.TypeName exceeded maximum length of 1024") + } + + if len(data.Message) > 32768 { + data.Message = data.Message[:32768] + warnings = append(warnings, "ExceptionDetails.Message exceeded maximum length of 32768") + } + + if len(data.Stack) > 32768 { + data.Stack = data.Stack[:32768] + warnings = append(warnings, "ExceptionDetails.Stack exceeded maximum length of 32768") + } + + for _, ptr := range data.ParsedStack { + warnings = append(warnings, ptr.Sanitize()...) + } + + return warnings +} + +// Creates a new ExceptionDetails instance with default values set by the schema. +func NewExceptionDetails() *ExceptionDetails { + return &ExceptionDetails{ + HasFullStack: true, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/messagedata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/messagedata.go new file mode 100644 index 0000000000..c0676431f2 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/messagedata.go @@ -0,0 +1,72 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Instances of Message represent printf-like trace statements that are +// text-searched. Log4Net, NLog and other text-based log file entries are +// translated into intances of this type. The message does not have +// measurements. +type MessageData struct { + Domain + + // Schema version + Ver int `json:"ver"` + + // Trace message + Message string `json:"message"` + + // Trace severity level. + SeverityLevel SeverityLevel `json:"severityLevel"` + + // Collection of custom properties. + Properties map[string]string `json:"properties,omitempty"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *MessageData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".Message" + } else { + return "Microsoft.ApplicationInsights.Message" + } +} + +// Returns the base type when placed within a Data object container. +func (data *MessageData) BaseType() string { + return "MessageData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *MessageData) Sanitize() []string { + var warnings []string + + if len(data.Message) > 32768 { + data.Message = data.Message[:32768] + warnings = append(warnings, "MessageData.Message exceeded maximum length of 32768") + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "MessageData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "MessageData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new MessageData instance with default values set by the schema. +func NewMessageData() *MessageData { + return &MessageData{ + Ver: 2, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/metricdata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/metricdata.go new file mode 100644 index 0000000000..106576f2c7 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/metricdata.go @@ -0,0 +1,68 @@ +package contracts + +// NOTE: This file was automatically generated. + +// An instance of the Metric item is a list of measurements (single data +// points) and/or aggregations. +type MetricData struct { + Domain + + // Schema version + Ver int `json:"ver"` + + // List of metrics. Only one metric in the list is currently supported by + // Application Insights storage. If multiple data points were sent only the + // first one will be used. + Metrics []*DataPoint `json:"metrics"` + + // Collection of custom properties. + Properties map[string]string `json:"properties,omitempty"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *MetricData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".Metric" + } else { + return "Microsoft.ApplicationInsights.Metric" + } +} + +// Returns the base type when placed within a Data object container. +func (data *MetricData) BaseType() string { + return "MetricData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *MetricData) Sanitize() []string { + var warnings []string + + for _, ptr := range data.Metrics { + warnings = append(warnings, ptr.Sanitize()...) + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "MetricData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "MetricData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new MetricData instance with default values set by the schema. +func NewMetricData() *MetricData { + return &MetricData{ + Ver: 2, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/package.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/package.go new file mode 100644 index 0000000000..ac96d6d35e --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/package.go @@ -0,0 +1,4 @@ +// Data contract definitions for telemetry submitted to Application Insights. +// This is generated from the schemas found at +// https://github.com/microsoft/ApplicationInsights-Home/tree/master/EndpointSpecs/Schemas/Bond +package contracts diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/pageviewdata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/pageviewdata.go new file mode 100644 index 0000000000..15e1d0aa93 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/pageviewdata.go @@ -0,0 +1,85 @@ +package contracts + +// NOTE: This file was automatically generated. + +// An instance of PageView represents a generic action on a page like a button +// click. It is also the base type for PageView. +type PageViewData struct { + Domain + EventData + + // Request URL with all query string parameters + Url string `json:"url"` + + // Request duration in format: DD.HH:MM:SS.MMMMMM. For a page view + // (PageViewData), this is the duration. For a page view with performance + // information (PageViewPerfData), this is the page load time. Must be less + // than 1000 days. + Duration string `json:"duration"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *PageViewData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".PageView" + } else { + return "Microsoft.ApplicationInsights.PageView" + } +} + +// Returns the base type when placed within a Data object container. +func (data *PageViewData) BaseType() string { + return "PageViewData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *PageViewData) Sanitize() []string { + var warnings []string + + if len(data.Url) > 2048 { + data.Url = data.Url[:2048] + warnings = append(warnings, "PageViewData.Url exceeded maximum length of 2048") + } + + if len(data.Name) > 512 { + data.Name = data.Name[:512] + warnings = append(warnings, "PageViewData.Name exceeded maximum length of 512") + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "PageViewData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "PageViewData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + if data.Measurements != nil { + for k, v := range data.Measurements { + if len(k) > 150 { + data.Measurements[k[:150]] = v + delete(data.Measurements, k) + warnings = append(warnings, "PageViewData.Measurements has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new PageViewData instance with default values set by the schema. +func NewPageViewData() *PageViewData { + return &PageViewData{ + EventData: EventData{ + Ver: 2, + }, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/remotedependencydata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/remotedependencydata.go new file mode 100644 index 0000000000..f078243f45 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/remotedependencydata.go @@ -0,0 +1,134 @@ +package contracts + +// NOTE: This file was automatically generated. + +// An instance of Remote Dependency represents an interaction of the monitored +// component with a remote component/service like SQL or an HTTP endpoint. +type RemoteDependencyData struct { + Domain + + // Schema version + Ver int `json:"ver"` + + // Name of the command initiated with this dependency call. Low cardinality + // value. Examples are stored procedure name and URL path template. + Name string `json:"name"` + + // Identifier of a dependency call instance. Used for correlation with the + // request telemetry item corresponding to this dependency call. + Id string `json:"id"` + + // Result code of a dependency call. Examples are SQL error code and HTTP + // status code. + ResultCode string `json:"resultCode"` + + // Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 + // days. + Duration string `json:"duration"` + + // Indication of successfull or unsuccessfull call. + Success bool `json:"success"` + + // Command initiated by this dependency call. Examples are SQL statement and + // HTTP URL's with all query parameters. + Data string `json:"data"` + + // Target site of a dependency call. Examples are server name, host address. + Target string `json:"target"` + + // Dependency type name. Very low cardinality value for logical grouping of + // dependencies and interpretation of other fields like commandName and + // resultCode. Examples are SQL, Azure table, and HTTP. + Type string `json:"type"` + + // Collection of custom properties. + Properties map[string]string `json:"properties,omitempty"` + + // Collection of custom measurements. + Measurements map[string]float64 `json:"measurements,omitempty"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *RemoteDependencyData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".RemoteDependency" + } else { + return "Microsoft.ApplicationInsights.RemoteDependency" + } +} + +// Returns the base type when placed within a Data object container. +func (data *RemoteDependencyData) BaseType() string { + return "RemoteDependencyData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *RemoteDependencyData) Sanitize() []string { + var warnings []string + + if len(data.Name) > 1024 { + data.Name = data.Name[:1024] + warnings = append(warnings, "RemoteDependencyData.Name exceeded maximum length of 1024") + } + + if len(data.Id) > 128 { + data.Id = data.Id[:128] + warnings = append(warnings, "RemoteDependencyData.Id exceeded maximum length of 128") + } + + if len(data.ResultCode) > 1024 { + data.ResultCode = data.ResultCode[:1024] + warnings = append(warnings, "RemoteDependencyData.ResultCode exceeded maximum length of 1024") + } + + if len(data.Data) > 8192 { + data.Data = data.Data[:8192] + warnings = append(warnings, "RemoteDependencyData.Data exceeded maximum length of 8192") + } + + if len(data.Target) > 1024 { + data.Target = data.Target[:1024] + warnings = append(warnings, "RemoteDependencyData.Target exceeded maximum length of 1024") + } + + if len(data.Type) > 1024 { + data.Type = data.Type[:1024] + warnings = append(warnings, "RemoteDependencyData.Type exceeded maximum length of 1024") + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "RemoteDependencyData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "RemoteDependencyData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + if data.Measurements != nil { + for k, v := range data.Measurements { + if len(k) > 150 { + data.Measurements[k[:150]] = v + delete(data.Measurements, k) + warnings = append(warnings, "RemoteDependencyData.Measurements has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new RemoteDependencyData instance with default values set by the schema. +func NewRemoteDependencyData() *RemoteDependencyData { + return &RemoteDependencyData{ + Ver: 2, + Success: true, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/requestdata.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/requestdata.go new file mode 100644 index 0000000000..7db3b0aa90 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/requestdata.go @@ -0,0 +1,125 @@ +package contracts + +// NOTE: This file was automatically generated. + +// An instance of Request represents completion of an external request to the +// application to do work and contains a summary of that request execution and +// the results. +type RequestData struct { + Domain + + // Schema version + Ver int `json:"ver"` + + // Identifier of a request call instance. Used for correlation between request + // and other telemetry items. + Id string `json:"id"` + + // Source of the request. Examples are the instrumentation key of the caller + // or the ip address of the caller. + Source string `json:"source"` + + // Name of the request. Represents code path taken to process request. Low + // cardinality value to allow better grouping of requests. For HTTP requests + // it represents the HTTP method and URL path template like 'GET + // /values/{id}'. + Name string `json:"name"` + + // Request duration in format: DD.HH:MM:SS.MMMMMM. Must be less than 1000 + // days. + Duration string `json:"duration"` + + // Result of a request execution. HTTP status code for HTTP requests. + ResponseCode string `json:"responseCode"` + + // Indication of successfull or unsuccessfull call. + Success bool `json:"success"` + + // Request URL with all query string parameters. + Url string `json:"url"` + + // Collection of custom properties. + Properties map[string]string `json:"properties,omitempty"` + + // Collection of custom measurements. + Measurements map[string]float64 `json:"measurements,omitempty"` +} + +// Returns the name used when this is embedded within an Envelope container. +func (data *RequestData) EnvelopeName(key string) string { + if key != "" { + return "Microsoft.ApplicationInsights." + key + ".Request" + } else { + return "Microsoft.ApplicationInsights.Request" + } +} + +// Returns the base type when placed within a Data object container. +func (data *RequestData) BaseType() string { + return "RequestData" +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *RequestData) Sanitize() []string { + var warnings []string + + if len(data.Id) > 128 { + data.Id = data.Id[:128] + warnings = append(warnings, "RequestData.Id exceeded maximum length of 128") + } + + if len(data.Source) > 1024 { + data.Source = data.Source[:1024] + warnings = append(warnings, "RequestData.Source exceeded maximum length of 1024") + } + + if len(data.Name) > 1024 { + data.Name = data.Name[:1024] + warnings = append(warnings, "RequestData.Name exceeded maximum length of 1024") + } + + if len(data.ResponseCode) > 1024 { + data.ResponseCode = data.ResponseCode[:1024] + warnings = append(warnings, "RequestData.ResponseCode exceeded maximum length of 1024") + } + + if len(data.Url) > 2048 { + data.Url = data.Url[:2048] + warnings = append(warnings, "RequestData.Url exceeded maximum length of 2048") + } + + if data.Properties != nil { + for k, v := range data.Properties { + if len(v) > 8192 { + data.Properties[k] = v[:8192] + warnings = append(warnings, "RequestData.Properties has value with length exceeding max of 8192: "+k) + } + if len(k) > 150 { + data.Properties[k[:150]] = data.Properties[k] + delete(data.Properties, k) + warnings = append(warnings, "RequestData.Properties has key with length exceeding max of 150: "+k) + } + } + } + + if data.Measurements != nil { + for k, v := range data.Measurements { + if len(k) > 150 { + data.Measurements[k[:150]] = v + delete(data.Measurements, k) + warnings = append(warnings, "RequestData.Measurements has key with length exceeding max of 150: "+k) + } + } + } + + return warnings +} + +// Creates a new RequestData instance with default values set by the schema. +func NewRequestData() *RequestData { + return &RequestData{ + Ver: 2, + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/severitylevel.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/severitylevel.go new file mode 100644 index 0000000000..a2ec9b8f03 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/severitylevel.go @@ -0,0 +1,31 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Defines the level of severity for the event. +type SeverityLevel int + +const ( + Verbose SeverityLevel = 0 + Information SeverityLevel = 1 + Warning SeverityLevel = 2 + Error SeverityLevel = 3 + Critical SeverityLevel = 4 +) + +func (value SeverityLevel) String() string { + switch int(value) { + case 0: + return "Verbose" + case 1: + return "Information" + case 2: + return "Warning" + case 3: + return "Error" + case 4: + return "Critical" + default: + return "" + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/stackframe.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/stackframe.go new file mode 100644 index 0000000000..d012f6b140 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/contracts/stackframe.go @@ -0,0 +1,52 @@ +package contracts + +// NOTE: This file was automatically generated. + +// Stack frame information. +type StackFrame struct { + + // Level in the call stack. For the long stacks SDK may not report every + // function in a call stack. + Level int `json:"level"` + + // Method name. + Method string `json:"method"` + + // Name of the assembly (dll, jar, etc.) containing this function. + Assembly string `json:"assembly"` + + // File name or URL of the method implementation. + FileName string `json:"fileName"` + + // Line number of the code implementation. + Line int `json:"line"` +} + +// Truncates string fields that exceed their maximum supported sizes for this +// object and all objects it references. Returns a warning for each affected +// field. +func (data *StackFrame) Sanitize() []string { + var warnings []string + + if len(data.Method) > 1024 { + data.Method = data.Method[:1024] + warnings = append(warnings, "StackFrame.Method exceeded maximum length of 1024") + } + + if len(data.Assembly) > 1024 { + data.Assembly = data.Assembly[:1024] + warnings = append(warnings, "StackFrame.Assembly exceeded maximum length of 1024") + } + + if len(data.FileName) > 1024 { + data.FileName = data.FileName[:1024] + warnings = append(warnings, "StackFrame.FileName exceeded maximum length of 1024") + } + + return warnings +} + +// Creates a new StackFrame instance with default values set by the schema. +func NewStackFrame() *StackFrame { + return &StackFrame{} +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/diagnostics.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/diagnostics.go new file mode 100644 index 0000000000..7ff90bfaee --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/diagnostics.go @@ -0,0 +1,88 @@ +package appinsights + +import ( + "fmt" + "sync" +) + +type diagnosticsMessageWriter struct { + listeners []*diagnosticsMessageListener + lock sync.Mutex +} + +// Handler function for receiving diagnostics messages. If this returns an +// error, then the listener will be removed. +type DiagnosticsMessageHandler func(string) error + +// Listener type returned by NewDiagnosticsMessageListener. +type DiagnosticsMessageListener interface { + // Stop receiving diagnostics messages from this listener. + Remove() +} + +type diagnosticsMessageListener struct { + handler DiagnosticsMessageHandler + writer *diagnosticsMessageWriter +} + +func (listener *diagnosticsMessageListener) Remove() { + listener.writer.removeListener(listener) +} + +// The one and only diagnostics writer. +var diagnosticsWriter = &diagnosticsMessageWriter{} + +// Subscribes the specified handler to diagnostics messages from the SDK. The +// returned interface can be used to unsubscribe. +func NewDiagnosticsMessageListener(handler DiagnosticsMessageHandler) DiagnosticsMessageListener { + listener := &diagnosticsMessageListener{ + handler: handler, + writer: diagnosticsWriter, + } + + diagnosticsWriter.appendListener(listener) + return listener +} + +func (writer *diagnosticsMessageWriter) appendListener(listener *diagnosticsMessageListener) { + writer.lock.Lock() + defer writer.lock.Unlock() + writer.listeners = append(writer.listeners, listener) +} + +func (writer *diagnosticsMessageWriter) removeListener(listener *diagnosticsMessageListener) { + writer.lock.Lock() + defer writer.lock.Unlock() + + for i := 0; i < len(writer.listeners); i++ { + if writer.listeners[i] == listener { + writer.listeners[i] = writer.listeners[len(writer.listeners)-1] + writer.listeners = writer.listeners[:len(writer.listeners)-1] + return + } + } +} + +func (writer *diagnosticsMessageWriter) Write(message string) { + var toRemove []*diagnosticsMessageListener + for _, listener := range writer.listeners { + if err := listener.handler(message); err != nil { + toRemove = append(toRemove, listener) + } + } + + for _, listener := range toRemove { + listener.Remove() + } +} + +func (writer *diagnosticsMessageWriter) Printf(message string, args ...interface{}) { + // Don't bother with Sprintf if nobody is listening + if writer.hasListeners() { + writer.Write(fmt.Sprintf(message, args...)) + } +} + +func (writer *diagnosticsMessageWriter) hasListeners() bool { + return len(writer.listeners) > 0 +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/exception.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/exception.go new file mode 100644 index 0000000000..c440797dcf --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/exception.go @@ -0,0 +1,150 @@ +package appinsights + +import ( + "fmt" + "reflect" + "runtime" + "strings" + + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +// Exception telemetry items represent a handled or unhandled exceptions that +// occurred during execution of the monitored application. +type ExceptionTelemetry struct { + BaseTelemetry + BaseTelemetryMeasurements + + // Panic message: string, error, or Stringer + Error interface{} + + // List of stack frames. Use GetCallstack to generate this data. + Frames []*contracts.StackFrame + + // Severity level. + SeverityLevel contracts.SeverityLevel +} + +// Creates a new exception telemetry item with the specified error and the +// current callstack. This should be used directly from a function that +// handles a recover(), or to report an unexpected error return value from +// a function. +func NewExceptionTelemetry(err interface{}) *ExceptionTelemetry { + return newExceptionTelemetry(err, 1) +} + +func newExceptionTelemetry(err interface{}, skip int) *ExceptionTelemetry { + return &ExceptionTelemetry{ + Error: err, + Frames: GetCallstack(2 + skip), + SeverityLevel: Error, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + BaseTelemetryMeasurements: BaseTelemetryMeasurements{ + Measurements: make(map[string]float64), + }, + } +} + +func (telem *ExceptionTelemetry) TelemetryData() TelemetryData { + details := contracts.NewExceptionDetails() + details.HasFullStack = len(telem.Frames) > 0 + details.ParsedStack = telem.Frames + + if err, ok := telem.Error.(error); ok { + details.Message = err.Error() + details.TypeName = reflect.TypeOf(telem.Error).String() + } else if str, ok := telem.Error.(string); ok { + details.Message = str + details.TypeName = "string" + } else if stringer, ok := telem.Error.(fmt.Stringer); ok { + details.Message = stringer.String() + details.TypeName = reflect.TypeOf(telem.Error).String() + } else if stringer, ok := telem.Error.(fmt.GoStringer); ok { + details.Message = stringer.GoString() + details.TypeName = reflect.TypeOf(telem.Error).String() + } else { + details.Message = "" + details.TypeName = "" + } + + data := contracts.NewExceptionData() + data.SeverityLevel = telem.SeverityLevel + data.Exceptions = []*contracts.ExceptionDetails{details} + data.Properties = telem.Properties + data.Measurements = telem.Measurements + + return data +} + +// Generates a callstack suitable for inclusion in Application Insights +// exception telemetry for the current goroutine, skipping a number of frames +// specified by skip. +func GetCallstack(skip int) []*contracts.StackFrame { + var stackFrames []*contracts.StackFrame + + if skip < 0 { + skip = 0 + } + + stack := make([]uintptr, 64+skip) + depth := runtime.Callers(skip+1, stack) + if depth == 0 { + return stackFrames + } + + frames := runtime.CallersFrames(stack[:depth]) + level := 0 + for { + frame, more := frames.Next() + + stackFrame := &contracts.StackFrame{ + Level: level, + FileName: frame.File, + Line: frame.Line, + } + + if frame.Function != "" { + /* Default */ + stackFrame.Method = frame.Function + + /* Break up function into assembly/function */ + lastSlash := strings.LastIndexByte(frame.Function, '/') + if lastSlash < 0 { + // e.g. "runtime.gopanic" + // The below works with lastSlash=0 + lastSlash = 0 + } + + firstDot := strings.IndexByte(frame.Function[lastSlash:], '.') + if firstDot >= 0 { + stackFrame.Assembly = frame.Function[:lastSlash+firstDot] + stackFrame.Method = frame.Function[lastSlash+firstDot+1:] + } + } + + stackFrames = append(stackFrames, stackFrame) + + level++ + if !more { + break + } + } + + return stackFrames +} + +// Recovers from any active panics and tracks them to the specified +// TelemetryClient. If rethrow is set to true, then this will panic. +// Should be invoked via defer in functions to monitor. +func TrackPanic(client TelemetryClient, rethrow bool) { + if r := recover(); r != nil { + client.Track(newExceptionTelemetry(r, 1)) + if rethrow { + panic(r) + } + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/inmemorychannel.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/inmemorychannel.go new file mode 100644 index 0000000000..a309bed32d --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/inmemorychannel.go @@ -0,0 +1,449 @@ +package appinsights + +import ( + "sync" + "time" + + "code.cloudfoundry.org/clock" + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +var ( + submit_retries = []time.Duration{time.Duration(10 * time.Second), time.Duration(30 * time.Second), time.Duration(60 * time.Second)} +) + +// A telemetry channel that stores events exclusively in memory. Presently +// the only telemetry channel implementation available. +type InMemoryChannel struct { + endpointAddress string + isDeveloperMode bool + collectChan chan *contracts.Envelope + controlChan chan *inMemoryChannelControl + batchSize int + batchInterval time.Duration + waitgroup sync.WaitGroup + throttle *throttleManager + transmitter transmitter +} + +type inMemoryChannelControl struct { + // If true, flush the buffer. + flush bool + + // If true, stop listening on the channel. (Flush is required if any events are to be sent) + stop bool + + // If stopping and flushing, this specifies whether to retry submissions on error. + retry bool + + // If retrying, what is the max time to wait before finishing up? + timeout time.Duration + + // If specified, a message will be sent on this channel when all pending telemetry items have been submitted + callback chan struct{} +} + +// Creates an InMemoryChannel instance and starts a background submission +// goroutine. +func NewInMemoryChannel(config *TelemetryConfiguration) *InMemoryChannel { + channel := &InMemoryChannel{ + endpointAddress: config.EndpointUrl, + collectChan: make(chan *contracts.Envelope), + controlChan: make(chan *inMemoryChannelControl), + batchSize: config.MaxBatchSize, + batchInterval: config.MaxBatchInterval, + throttle: newThrottleManager(), + transmitter: newTransmitter(config.EndpointUrl), + } + + go channel.acceptLoop() + + return channel +} + +// The address of the endpoint to which telemetry is sent +func (channel *InMemoryChannel) EndpointAddress() string { + return channel.endpointAddress +} + +// Queues a single telemetry item +func (channel *InMemoryChannel) Send(item *contracts.Envelope) { + if item != nil && channel.collectChan != nil { + channel.collectChan <- item + } +} + +// Forces the current queue to be sent +func (channel *InMemoryChannel) Flush() { + if channel.controlChan != nil { + channel.controlChan <- &inMemoryChannelControl{ + flush: true, + } + } +} + +// Tears down the submission goroutines, closes internal channels. Any +// telemetry waiting to be sent is discarded. Further calls to Send() have +// undefined behavior. This is a more abrupt version of Close(). +func (channel *InMemoryChannel) Stop() { + if channel.controlChan != nil { + channel.controlChan <- &inMemoryChannelControl{ + stop: true, + } + } +} + +// Returns true if this channel has been throttled by the data collector. +func (channel *InMemoryChannel) IsThrottled() bool { + return channel.throttle != nil && channel.throttle.IsThrottled() +} + +// Flushes and tears down the submission goroutine and closes internal +// channels. Returns a channel that is closed when all pending telemetry +// items have been submitted and it is safe to shut down without losing +// telemetry. +// +// If retryTimeout is specified and non-zero, then failed submissions will +// be retried until one succeeds or the timeout expires, whichever occurs +// first. A retryTimeout of zero indicates that failed submissions will be +// retried as usual. An omitted retryTimeout indicates that submissions +// should not be retried if they fail. +// +// Note that the returned channel may not be closed before retryTimeout even +// if it is specified. This is because retryTimeout only applies to the +// latest telemetry buffer. This may be typical for applications that +// submit a large amount of telemetry or are prone to being throttled. When +// exiting, you should select on the result channel and your own timer to +// avoid long delays. +func (channel *InMemoryChannel) Close(timeout ...time.Duration) <-chan struct{} { + if channel.controlChan != nil { + callback := make(chan struct{}) + + ctl := &inMemoryChannelControl{ + stop: true, + flush: true, + retry: false, + callback: callback, + } + + if len(timeout) > 0 { + ctl.retry = true + ctl.timeout = timeout[0] + } + + channel.controlChan <- ctl + + return callback + } else { + return nil + } +} + +func (channel *InMemoryChannel) acceptLoop() { + channelState := newInMemoryChannelState(channel) + + for !channelState.stopping { + channelState.start() + } + + channelState.stop() +} + +// Data shared between parts of a channel +type inMemoryChannelState struct { + channel *InMemoryChannel + stopping bool + buffer telemetryBufferItems + retry bool + retryTimeout time.Duration + callback chan struct{} + timer clock.Timer +} + +func newInMemoryChannelState(channel *InMemoryChannel) *inMemoryChannelState { + // Initialize timer to stopped -- avoid any chance of a race condition. + timer := currentClock.NewTimer(time.Hour) + timer.Stop() + + return &inMemoryChannelState{ + channel: channel, + buffer: make(telemetryBufferItems, 0, 16), + stopping: false, + timer: timer, + } +} + +// Part of channel accept loop: Initialize buffer and accept first message, handle controls. +func (state *inMemoryChannelState) start() bool { + if len(state.buffer) > 16 { + // Start out with the size of the previous buffer + state.buffer = make(telemetryBufferItems, 0, cap(state.buffer)) + } else if len(state.buffer) > 0 { + // Start out with at least 16 slots + state.buffer = make(telemetryBufferItems, 0, 16) + } + + // Wait for an event + select { + case event := <-state.channel.collectChan: + if event == nil { + // Channel closed? Not intercepted by Send()? + panic("Received nil event") + } + + state.buffer = append(state.buffer, event) + + case ctl := <-state.channel.controlChan: + // The buffer is empty, so there would be no point in flushing + state.channel.signalWhenDone(ctl.callback) + + if ctl.stop { + state.stopping = true + return false + } + } + + if len(state.buffer) == 0 { + return true + } + + return state.waitToSend() +} + +// Part of channel accept loop: Wait for buffer to fill, timeout to expire, or flush +func (state *inMemoryChannelState) waitToSend() bool { + // Things that are used by the sender if we receive a control message + state.retryTimeout = 0 + state.retry = true + state.callback = nil + + // Delay until timeout passes or buffer fills up + state.timer.Reset(state.channel.batchInterval) + + for { + if len(state.buffer) >= state.channel.batchSize { + if !state.timer.Stop() { + <-state.timer.C() + } + + return state.send() + } + + select { + case event := <-state.channel.collectChan: + if event == nil { + // Channel closed? Not intercepted by Send()? + panic("Received nil event") + } + + state.buffer = append(state.buffer, event) + + case ctl := <-state.channel.controlChan: + if ctl.stop { + state.stopping = true + state.retry = ctl.retry + if !ctl.flush { + // No flush? Just exit. + state.channel.signalWhenDone(ctl.callback) + return false + } + } + + if ctl.flush { + if !state.timer.Stop() { + <-state.timer.C() + } + + state.retryTimeout = ctl.timeout + state.callback = ctl.callback + return state.send() + } + + case <-state.timer.C(): + // Timeout expired + return state.send() + } + } +} + +// Part of channel accept loop: Check and wait on throttle, submit pending telemetry +func (state *inMemoryChannelState) send() bool { + // Hold up transmission if we're being throttled + if !state.stopping && state.channel.throttle.IsThrottled() { + if !state.waitThrottle() { + // Stopped + return false + } + } + + // Send + if len(state.buffer) > 0 { + state.channel.waitgroup.Add(1) + + // If we have a callback, wait on the waitgroup now that it's + // incremented. + state.channel.signalWhenDone(state.callback) + + go func(buffer telemetryBufferItems, retry bool, retryTimeout time.Duration) { + defer state.channel.waitgroup.Done() + state.channel.transmitRetry(buffer, retry, retryTimeout) + }(state.buffer, state.retry, state.retryTimeout) + } else if state.callback != nil { + state.channel.signalWhenDone(state.callback) + } + + return true +} + +// Part of channel accept loop: Wait for throttle to expire while dropping messages +func (state *inMemoryChannelState) waitThrottle() bool { + // Channel is currently throttled. Once the buffer fills, messages will + // be lost... If we're exiting, then we'll just try to submit anyway. That + // request may be throttled and transmitRetry will perform the backoff correctly. + + diagnosticsWriter.Write("Channel is throttled, events may be dropped.") + throttleDone := state.channel.throttle.NotifyWhenReady() + dropped := 0 + + defer diagnosticsWriter.Printf("Channel dropped %d events while throttled", dropped) + + for { + select { + case <-throttleDone: + close(throttleDone) + return true + + case event := <-state.channel.collectChan: + // If there's still room in the buffer, then go ahead and add it. + if len(state.buffer) < state.channel.batchSize { + state.buffer = append(state.buffer, event) + } else { + if dropped == 0 { + diagnosticsWriter.Write("Buffer is full, dropping further events.") + } + + dropped++ + } + + case ctl := <-state.channel.controlChan: + if ctl.stop { + state.stopping = true + state.retry = ctl.retry + if !ctl.flush { + state.channel.signalWhenDone(ctl.callback) + return false + } else { + // Make an exception when stopping + return true + } + } + + // Cannot flush + // TODO: Figure out what to do about callback? + if ctl.flush { + state.channel.signalWhenDone(ctl.callback) + } + } + } +} + +// Part of channel accept loop: Clean up and close telemetry channel +func (state *inMemoryChannelState) stop() { + close(state.channel.collectChan) + close(state.channel.controlChan) + + state.channel.collectChan = nil + state.channel.controlChan = nil + + // Throttle can't close until transmitters are done using it. + state.channel.waitgroup.Wait() + state.channel.throttle.Stop() + + state.channel.throttle = nil +} + +func (channel *InMemoryChannel) transmitRetry(items telemetryBufferItems, retry bool, retryTimeout time.Duration) { + payload := items.serialize() + retryTimeRemaining := retryTimeout + + for _, wait := range submit_retries { + result, err := channel.transmitter.Transmit(payload, items) + if err == nil && result != nil && result.IsSuccess() { + return + } + + if !retry { + diagnosticsWriter.Write("Refusing to retry telemetry submission (retry==false)") + return + } + + // Check for success, determine if we need to retry anything + if result != nil { + if result.CanRetry() { + // Filter down to failed items + payload, items = result.GetRetryItems(payload, items) + if len(payload) == 0 || len(items) == 0 { + return + } + } else { + diagnosticsWriter.Write("Cannot retry telemetry submission") + return + } + + // Check for throttling + if result.IsThrottled() { + if result.retryAfter != nil { + diagnosticsWriter.Printf("Channel is throttled until %s", *result.retryAfter) + channel.throttle.RetryAfter(*result.retryAfter) + } else { + // TODO: Pick a time + } + } + } + + if retryTimeout > 0 { + // We're on a time schedule here. Make sure we don't try longer + // than we have been allowed. + if retryTimeRemaining < wait { + // One more chance left -- we'll wait the max time we can + // and then retry on the way out. + currentClock.Sleep(retryTimeRemaining) + break + } else { + // Still have time left to go through the rest of the regular + // retry schedule + retryTimeRemaining -= wait + } + } + + diagnosticsWriter.Printf("Waiting %s to retry submission", wait) + currentClock.Sleep(wait) + + // Wait if the channel is throttled and we're not on a schedule + if channel.IsThrottled() && retryTimeout == 0 { + diagnosticsWriter.Printf("Channel is throttled; extending wait time.") + ch := channel.throttle.NotifyWhenReady() + result := <-ch + close(ch) + + if !result { + return + } + } + } + + // One final try + _, err := channel.transmitter.Transmit(payload, items) + if err != nil { + diagnosticsWriter.Write("Gave up transmitting payload; exhausted retries") + } +} + +func (channel *InMemoryChannel) signalWhenDone(callback chan struct{}) { + if callback != nil { + go func() { + channel.waitgroup.Wait() + close(callback) + }() + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/jsonserializer.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/jsonserializer.go new file mode 100644 index 0000000000..4706cd764d --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/jsonserializer.go @@ -0,0 +1,25 @@ +package appinsights + +import ( + "bytes" + "encoding/json" + + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +type telemetryBufferItems []*contracts.Envelope + +func (items telemetryBufferItems) serialize() []byte { + var result bytes.Buffer + encoder := json.NewEncoder(&result) + + for _, item := range items { + end := result.Len() + if err := encoder.Encode(item); err != nil { + diagnosticsWriter.Printf("Telemetry item failed to serialize: %s", err.Error()) + result.Truncate(end) + } + } + + return result.Bytes() +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/package.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/package.go new file mode 100644 index 0000000000..db634ed743 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/package.go @@ -0,0 +1,8 @@ +// Package appinsights provides an interface to submit telemetry to Application Insights. +// See more at https://azure.microsoft.com/en-us/services/application-insights/ +package appinsights + +const ( + sdkName = "go" + Version = "0.4.2" +) diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetry.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetry.go new file mode 100644 index 0000000000..54b88781e7 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetry.go @@ -0,0 +1,652 @@ +package appinsights + +import ( + "fmt" + "math" + "net/url" + "strconv" + "time" + + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +// Common interface implemented by telemetry data contracts +type TelemetryData interface { + EnvelopeName(string) string + BaseType() string + Sanitize() []string +} + +// Common interface implemented by telemetry items that can be passed to +// TelemetryClient.Track +type Telemetry interface { + // Gets the time when this item was measured + Time() time.Time + + // Sets the timestamp to the specified time. + SetTime(time.Time) + + // Gets context data containing extra, optional tags. Overrides + // values found on client TelemetryContext. + ContextTags() map[string]string + + // Gets the data contract as it will be submitted to the data + // collector. + TelemetryData() TelemetryData + + // Gets custom properties to submit with the telemetry item. + GetProperties() map[string]string + + // Gets custom measurements to submit with the telemetry item. + GetMeasurements() map[string]float64 +} + +// BaseTelemetry is the common base struct for telemetry items. +type BaseTelemetry struct { + // The time this when this item was measured + Timestamp time.Time + + // Custom properties + Properties map[string]string + + // Telemetry Context containing extra, optional tags. + Tags contracts.ContextTags +} + +// BaseTelemetryMeasurements provides the Measurements field for telemetry +// items that support it. +type BaseTelemetryMeasurements struct { + // Custom measurements + Measurements map[string]float64 +} + +// BaseTelemetryNoMeasurements provides no Measurements field for telemetry +// items that omit it. +type BaseTelemetryNoMeasurements struct { +} + +// Time returns the timestamp when this was measured. +func (item *BaseTelemetry) Time() time.Time { + return item.Timestamp +} + +// SetTime sets the timestamp to the specified time. +func (item *BaseTelemetry) SetTime(t time.Time) { + item.Timestamp = t +} + +// Gets context data containing extra, optional tags. Overrides values +// found on client TelemetryContext. +func (item *BaseTelemetry) ContextTags() map[string]string { + return item.Tags +} + +// Gets custom properties to submit with the telemetry item. +func (item *BaseTelemetry) GetProperties() map[string]string { + return item.Properties +} + +// Gets custom measurements to submit with the telemetry item. +func (item *BaseTelemetryMeasurements) GetMeasurements() map[string]float64 { + return item.Measurements +} + +// GetMeasurements returns nil for telemetry items that do not support measurements. +func (item *BaseTelemetryNoMeasurements) GetMeasurements() map[string]float64 { + return nil +} + +// Trace telemetry items represent printf-like trace statements that can be +// text searched. +type TraceTelemetry struct { + BaseTelemetry + BaseTelemetryNoMeasurements + + // Trace message + Message string + + // Severity level + SeverityLevel contracts.SeverityLevel +} + +// Creates a trace telemetry item with the specified message and severity +// level. +func NewTraceTelemetry(message string, severityLevel contracts.SeverityLevel) *TraceTelemetry { + return &TraceTelemetry{ + Message: message, + SeverityLevel: severityLevel, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + } +} + +func (trace *TraceTelemetry) TelemetryData() TelemetryData { + data := contracts.NewMessageData() + data.Message = trace.Message + data.Properties = trace.Properties + data.SeverityLevel = trace.SeverityLevel + + return data +} + +// Event telemetry items represent structured event records. +type EventTelemetry struct { + BaseTelemetry + BaseTelemetryMeasurements + + // Event name + Name string +} + +// Creates an event telemetry item with the specified name. +func NewEventTelemetry(name string) *EventTelemetry { + return &EventTelemetry{ + Name: name, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + BaseTelemetryMeasurements: BaseTelemetryMeasurements{ + Measurements: make(map[string]float64), + }, + } +} + +func (event *EventTelemetry) TelemetryData() TelemetryData { + data := contracts.NewEventData() + data.Name = event.Name + data.Properties = event.Properties + data.Measurements = event.Measurements + + return data +} + +// Metric telemetry items each represent a single data point. +type MetricTelemetry struct { + BaseTelemetry + BaseTelemetryNoMeasurements + + // Metric name + Name string + + // Sampled value + Value float64 +} + +// Creates a metric telemetry sample with the specified name and value. +func NewMetricTelemetry(name string, value float64) *MetricTelemetry { + return &MetricTelemetry{ + Name: name, + Value: value, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + } +} + +func (metric *MetricTelemetry) TelemetryData() TelemetryData { + dataPoint := contracts.NewDataPoint() + dataPoint.Name = metric.Name + dataPoint.Value = metric.Value + dataPoint.Count = 1 + dataPoint.Kind = contracts.Measurement + + data := contracts.NewMetricData() + data.Metrics = []*contracts.DataPoint{dataPoint} + data.Properties = metric.Properties + + return data +} + +// Aggregated metric telemetry items represent an aggregation of data points +// over time. These values can be calculated by the caller or with the AddData +// function. +type AggregateMetricTelemetry struct { + BaseTelemetry + BaseTelemetryNoMeasurements + + // Metric name + Name string + + // Sum of individual measurements + Value float64 + + // Minimum value of the aggregated metric + Min float64 + + // Maximum value of the aggregated metric + Max float64 + + // Count of measurements in the sample + Count int + + // Standard deviation of the aggregated metric + StdDev float64 + + // Variance of the aggregated metric. As an invariant, + // either this or the StdDev should be zero at any given time. + // If both are non-zero then StdDev takes precedence. + Variance float64 +} + +// Creates a new aggregated metric telemetry item with the specified name. +// Values should be set on the object returned before submission. +func NewAggregateMetricTelemetry(name string) *AggregateMetricTelemetry { + return &AggregateMetricTelemetry{ + Name: name, + Count: 0, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + } +} + +// Adds data points to the aggregate totals included in this telemetry item. +// This can be used for all the data at once or incrementally. Calculates +// Min, Max, Sum, Count, and StdDev (by way of Variance). +func (agg *AggregateMetricTelemetry) AddData(values []float64) { + if agg.StdDev != 0.0 { + // If StdDev is non-zero, then square it to produce + // the variance, which is better for incremental calculations, + // and then zero it out. + agg.Variance = agg.StdDev * agg.StdDev + agg.StdDev = 0.0 + } + + vsum := agg.addData(values, agg.Variance*float64(agg.Count)) + if agg.Count > 0 { + agg.Variance = vsum / float64(agg.Count) + } +} + +// Adds sampled data points to the aggregate totals included in this telemetry item. +// This can be used for all the data at once or incrementally. Differs from AddData +// in how it calculates standard deviation, and should not be used interchangeably +// with AddData. +func (agg *AggregateMetricTelemetry) AddSampledData(values []float64) { + if agg.StdDev != 0.0 { + // If StdDev is non-zero, then square it to produce + // the variance, which is better for incremental calculations, + // and then zero it out. + agg.Variance = agg.StdDev * agg.StdDev + agg.StdDev = 0.0 + } + + vsum := agg.addData(values, agg.Variance*float64(agg.Count-1)) + if agg.Count > 1 { + // Sampled values should divide by n-1 + agg.Variance = vsum / float64(agg.Count-1) + } +} + +func (agg *AggregateMetricTelemetry) addData(values []float64, vsum float64) float64 { + if len(values) == 0 { + return vsum + } + + // Running tally of the mean is important for incremental variance computation. + var mean float64 + + if agg.Count == 0 { + agg.Min = values[0] + agg.Max = values[0] + } else { + mean = agg.Value / float64(agg.Count) + } + + for _, x := range values { + // Update Min, Max, Count, and Value + agg.Count++ + agg.Value += x + + if x < agg.Min { + agg.Min = x + } + + if x > agg.Max { + agg.Max = x + } + + // Welford's algorithm to compute variance. The divide occurs in the caller. + newMean := agg.Value / float64(agg.Count) + vsum += (x - mean) * (x - newMean) + mean = newMean + } + + return vsum +} + +func (agg *AggregateMetricTelemetry) TelemetryData() TelemetryData { + dataPoint := contracts.NewDataPoint() + dataPoint.Name = agg.Name + dataPoint.Value = agg.Value + dataPoint.Kind = contracts.Aggregation + dataPoint.Min = agg.Min + dataPoint.Max = agg.Max + dataPoint.Count = agg.Count + + if agg.StdDev != 0.0 { + dataPoint.StdDev = agg.StdDev + } else if agg.Variance > 0.0 { + dataPoint.StdDev = math.Sqrt(agg.Variance) + } + + data := contracts.NewMetricData() + data.Metrics = []*contracts.DataPoint{dataPoint} + data.Properties = agg.Properties + + return data +} + +// Request telemetry items represents completion of an external request to the +// application and contains a summary of that request execution and results. +type RequestTelemetry struct { + BaseTelemetry + BaseTelemetryMeasurements + + // Identifier of a request call instance. Used for correlation between request + // and other telemetry items. + Id string + + // Request name. For HTTP requests it represents the HTTP method and URL path template. + Name string + + // URL of the request with all query string parameters. + Url string + + // Duration to serve the request. + Duration time.Duration + + // Results of a request execution. HTTP status code for HTTP requests. + ResponseCode string + + // Indication of successful or unsuccessful call. + Success bool + + // Source of the request. Examplese are the instrumentation key of the caller + // or the ip address of the caller. + Source string +} + +// Creates a new request telemetry item for HTTP requests. The success value will be +// computed from responseCode, and the timestamp will be set to the current time minus +// the duration. +func NewRequestTelemetry(method, uri string, duration time.Duration, responseCode string) *RequestTelemetry { + success := true + code, err := strconv.Atoi(responseCode) + if err == nil { + success = code < 400 || code == 401 + } + + nameUri := uri + + // Sanitize URL for the request name + if parsedUrl, err := url.Parse(uri); err == nil { + // Remove the query + parsedUrl.RawQuery = "" + parsedUrl.ForceQuery = false + + // Remove the fragment + parsedUrl.Fragment = "" + + // Remove the user info, if any. + parsedUrl.User = nil + + // Write back to name + nameUri = parsedUrl.String() + } + + return &RequestTelemetry{ + Name: fmt.Sprintf("%s %s", method, nameUri), + Url: uri, + Id: newUUID().String(), + Duration: duration, + ResponseCode: responseCode, + Success: success, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now().Add(-duration), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + BaseTelemetryMeasurements: BaseTelemetryMeasurements{ + Measurements: make(map[string]float64), + }, + } +} + +// Sets the timestamp and duration of this telemetry item based on the provided +// start and end times. +func (request *RequestTelemetry) MarkTime(startTime, endTime time.Time) { + request.Timestamp = startTime + request.Duration = endTime.Sub(startTime) +} + +func (request *RequestTelemetry) TelemetryData() TelemetryData { + data := contracts.NewRequestData() + data.Name = request.Name + data.Duration = formatDuration(request.Duration) + data.ResponseCode = request.ResponseCode + data.Success = request.Success + data.Url = request.Url + data.Source = request.Source + + if request.Id == "" { + data.Id = newUUID().String() + } else { + data.Id = request.Id + } + + data.Properties = request.Properties + data.Measurements = request.Measurements + return data +} + +// Remote dependency telemetry items represent interactions of the monitored +// component with a remote component/service like SQL or an HTTP endpoint. +type RemoteDependencyTelemetry struct { + BaseTelemetry + BaseTelemetryMeasurements + + // Name of the command that initiated this dependency call. Low cardinality + // value. Examples are stored procedure name and URL path template. + Name string + + // Identifier of a dependency call instance. Used for correlation with the + // request telemetry item corresponding to this dependency call. + Id string + + // Result code of a dependency call. Examples are SQL error code and HTTP + // status code. + ResultCode string + + // Duration of the remote call. + Duration time.Duration + + // Indication of successful or unsuccessful call. + Success bool + + // Command initiated by this dependency call. Examples are SQL statement and + // HTTP URL's with all the query parameters. + Data string + + // Dependency type name. Very low cardinality. Examples are SQL, Azure table, + // and HTTP. + Type string + + // Target site of a dependency call. Examples are server name, host address. + Target string +} + +// Builds a new Remote Dependency telemetry item, with the specified name, +// dependency type, target site, and success status. +func NewRemoteDependencyTelemetry(name, dependencyType, target string, success bool) *RemoteDependencyTelemetry { + return &RemoteDependencyTelemetry{ + Name: name, + Type: dependencyType, + Target: target, + Success: success, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + BaseTelemetryMeasurements: BaseTelemetryMeasurements{ + Measurements: make(map[string]float64), + }, + } +} + +// Sets the timestamp and duration of this telemetry item based on the provided +// start and end times. +func (telem *RemoteDependencyTelemetry) MarkTime(startTime, endTime time.Time) { + telem.Timestamp = startTime + telem.Duration = endTime.Sub(startTime) +} + +func (telem *RemoteDependencyTelemetry) TelemetryData() TelemetryData { + data := contracts.NewRemoteDependencyData() + data.Name = telem.Name + data.Id = telem.Id + data.ResultCode = telem.ResultCode + data.Duration = formatDuration(telem.Duration) + data.Success = telem.Success + data.Data = telem.Data + data.Target = telem.Target + data.Properties = telem.Properties + data.Measurements = telem.Measurements + data.Type = telem.Type + + return data +} + +// Avaibility telemetry items represent the result of executing an availability +// test. +type AvailabilityTelemetry struct { + BaseTelemetry + BaseTelemetryMeasurements + + // Identifier of a test run. Used to correlate steps of test run and + // telemetry generated by the service. + Id string + + // Name of the test that this result represents. + Name string + + // Duration of the test run. + Duration time.Duration + + // Success flag. + Success bool + + // Name of the location where the test was run. + RunLocation string + + // Diagnostic message for the result. + Message string +} + +// Creates a new availability telemetry item with the specified test name, +// duration and success code. +func NewAvailabilityTelemetry(name string, duration time.Duration, success bool) *AvailabilityTelemetry { + return &AvailabilityTelemetry{ + Name: name, + Duration: duration, + Success: success, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + BaseTelemetryMeasurements: BaseTelemetryMeasurements{ + Measurements: make(map[string]float64), + }, + } +} + +// Sets the timestamp and duration of this telemetry item based on the provided +// start and end times. +func (telem *AvailabilityTelemetry) MarkTime(startTime, endTime time.Time) { + telem.Timestamp = startTime + telem.Duration = endTime.Sub(startTime) +} + +func (telem *AvailabilityTelemetry) TelemetryData() TelemetryData { + data := contracts.NewAvailabilityData() + data.Name = telem.Name + data.Duration = formatDuration(telem.Duration) + data.Success = telem.Success + data.RunLocation = telem.RunLocation + data.Message = telem.Message + data.Properties = telem.Properties + data.Id = telem.Id + data.Measurements = telem.Measurements + + return data +} + +// Page view telemetry items represent generic actions on a page like a button +// click. +type PageViewTelemetry struct { + BaseTelemetry + BaseTelemetryMeasurements + + // Request URL with all query string parameters + Url string + + // Request duration. + Duration time.Duration + + // Event name. + Name string +} + +// Creates a new page view telemetry item with the specified name and url. +func NewPageViewTelemetry(name, url string) *PageViewTelemetry { + return &PageViewTelemetry{ + Name: name, + Url: url, + BaseTelemetry: BaseTelemetry{ + Timestamp: currentClock.Now(), + Tags: make(contracts.ContextTags), + Properties: make(map[string]string), + }, + BaseTelemetryMeasurements: BaseTelemetryMeasurements{ + Measurements: make(map[string]float64), + }, + } +} + +// Sets the timestamp and duration of this telemetry item based on the provided +// start and end times. +func (telem *PageViewTelemetry) MarkTime(startTime, endTime time.Time) { + telem.Timestamp = startTime + telem.Duration = endTime.Sub(startTime) +} + +func (telem *PageViewTelemetry) TelemetryData() TelemetryData { + data := contracts.NewPageViewData() + data.Url = telem.Url + data.Duration = formatDuration(telem.Duration) + data.Name = telem.Name + data.Properties = telem.Properties + data.Measurements = telem.Measurements + return data +} + +func formatDuration(d time.Duration) string { + ticks := int64(d/(time.Nanosecond*100)) % 10000000 + seconds := int64(d/time.Second) % 60 + minutes := int64(d/time.Minute) % 60 + hours := int64(d/time.Hour) % 24 + days := int64(d / (time.Hour * 24)) + + return fmt.Sprintf("%d.%02d:%02d:%02d.%07d", days, hours, minutes, seconds, ticks) +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrychannel.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrychannel.go new file mode 100644 index 0000000000..c539d4ebd6 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrychannel.go @@ -0,0 +1,50 @@ +package appinsights + +import ( + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" + "time" +) + +// Implementations of TelemetryChannel are responsible for queueing and +// periodically submitting telemetry items. +type TelemetryChannel interface { + // The address of the endpoint to which telemetry is sent + EndpointAddress() string + + // Queues a single telemetry item + Send(*contracts.Envelope) + + // Forces the current queue to be sent + Flush() + + // Tears down the submission goroutines, closes internal channels. + // Any telemetry waiting to be sent is discarded. Further calls to + // Send() have undefined behavior. This is a more abrupt version of + // Close(). + Stop() + + // Returns true if this channel has been throttled by the data + // collector. + IsThrottled() bool + + // Flushes and tears down the submission goroutine and closes + // internal channels. Returns a channel that is closed when all + // pending telemetry items have been submitted and it is safe to + // shut down without losing telemetry. + // + // If retryTimeout is specified and non-zero, then failed + // submissions will be retried until one succeeds or the timeout + // expires, whichever occurs first. A retryTimeout of zero + // indicates that failed submissions will be retried as usual. An + // omitted retryTimeout indicates that submissions should not be + // retried if they fail. + // + // Note that the returned channel may not be closed before + // retryTimeout even if it is specified. This is because + // retryTimeout only applies to the latest telemetry buffer. This + // may be typical for applications that submit a large amount of + // telemetry or are prone to being throttled. When exiting, you + // should select on the result channel and your own timer to avoid + // long delays. + Close(retryTimeout ...time.Duration) <-chan struct{} +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrycontext.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrycontext.go new file mode 100644 index 0000000000..f54e36d146 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/telemetrycontext.go @@ -0,0 +1,104 @@ +package appinsights + +import ( + "strings" + + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +// Encapsulates contextual data common to all telemetry submitted through a +// TelemetryClient instance such as including instrumentation key, tags, and +// common properties. +type TelemetryContext struct { + // Instrumentation key + iKey string + + // Stripped-down instrumentation key used in envelope name + nameIKey string + + // Collection of tag data to attach to the telemetry item. + Tags contracts.ContextTags + + // Common properties to add to each telemetry item. This only has + // an effect from the TelemetryClient's context instance. This will + // be nil on telemetry items. + CommonProperties map[string]string +} + +// Creates a new, empty TelemetryContext +func NewTelemetryContext(ikey string) *TelemetryContext { + return &TelemetryContext{ + iKey: ikey, + nameIKey: strings.Replace(ikey, "-", "", -1), + Tags: make(contracts.ContextTags), + CommonProperties: make(map[string]string), + } +} + +// Gets the instrumentation key associated with this TelemetryContext. This +// will be an empty string on telemetry items' context instances. +func (context *TelemetryContext) InstrumentationKey() string { + return context.iKey +} + +// Wraps a telemetry item in an envelope with the information found in this +// context. +func (context *TelemetryContext) envelop(item Telemetry) *contracts.Envelope { + // Apply common properties + if props := item.GetProperties(); props != nil && context.CommonProperties != nil { + for k, v := range context.CommonProperties { + if _, ok := props[k]; !ok { + props[k] = v + } + } + } + + tdata := item.TelemetryData() + data := contracts.NewData() + data.BaseType = tdata.BaseType() + data.BaseData = tdata + + envelope := contracts.NewEnvelope() + envelope.Name = tdata.EnvelopeName(context.nameIKey) + envelope.Data = data + envelope.IKey = context.iKey + + timestamp := item.Time() + if timestamp.IsZero() { + timestamp = currentClock.Now() + } + + envelope.Time = timestamp.UTC().Format("2006-01-02T15:04:05.999999Z") + + if contextTags := item.ContextTags(); contextTags != nil { + envelope.Tags = contextTags + + // Copy in default tag values. + for tagkey, tagval := range context.Tags { + if _, ok := contextTags[tagkey]; !ok { + contextTags[tagkey] = tagval + } + } + } else { + // Create new tags object + envelope.Tags = make(map[string]string) + for k, v := range context.Tags { + envelope.Tags[k] = v + } + } + + // Create operation ID if it does not exist + if _, ok := envelope.Tags[contracts.OperationId]; !ok { + envelope.Tags[contracts.OperationId] = newUUID().String() + } + + // Sanitize. + for _, warn := range tdata.Sanitize() { + diagnosticsWriter.Printf("Telemetry data warning: %s", warn) + } + for _, warn := range contracts.SanitizeTags(envelope.Tags) { + diagnosticsWriter.Printf("Telemetry tag warning: %s", warn) + } + + return envelope +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/throttle.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/throttle.go new file mode 100644 index 0000000000..2c85800d14 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/throttle.go @@ -0,0 +1,144 @@ +package appinsights + +import ( + "time" +) + +type throttleManager struct { + msgs chan *throttleMessage +} + +type throttleMessage struct { + query bool + wait bool + throttle bool + stop bool + timestamp time.Time + result chan bool +} + +func newThrottleManager() *throttleManager { + result := &throttleManager{ + msgs: make(chan *throttleMessage), + } + + go result.run() + return result +} + +func (throttle *throttleManager) RetryAfter(t time.Time) { + throttle.msgs <- &throttleMessage{ + throttle: true, + timestamp: t, + } +} + +func (throttle *throttleManager) IsThrottled() bool { + ch := make(chan bool) + throttle.msgs <- &throttleMessage{ + query: true, + result: ch, + } + + result := <-ch + close(ch) + return result +} + +func (throttle *throttleManager) NotifyWhenReady() chan bool { + result := make(chan bool, 1) + throttle.msgs <- &throttleMessage{ + wait: true, + result: result, + } + + return result +} + +func (throttle *throttleManager) Stop() { + result := make(chan bool) + throttle.msgs <- &throttleMessage{ + stop: true, + result: result, + } + + <-result + close(result) +} + +func (throttle *throttleManager) run() { + for { + throttledUntil, ok := throttle.waitForThrottle() + if !ok { + break + } + + if !throttle.waitForReady(throttledUntil) { + break + } + } + + close(throttle.msgs) +} + +func (throttle *throttleManager) waitForThrottle() (time.Time, bool) { + for { + msg := <-throttle.msgs + if msg.query { + msg.result <- false + } else if msg.wait { + msg.result <- true + } else if msg.stop { + return time.Time{}, false + } else if msg.throttle { + return msg.timestamp, true + } + } +} + +func (throttle *throttleManager) waitForReady(throttledUntil time.Time) bool { + duration := throttledUntil.Sub(currentClock.Now()) + if duration <= 0 { + return true + } + + var notify []chan bool + + // --- Throttled and waiting --- + t := currentClock.NewTimer(duration) + + for { + select { + case <-t.C(): + for _, n := range notify { + n <- true + } + + return true + case msg := <-throttle.msgs: + if msg.query { + msg.result <- true + } else if msg.wait { + notify = append(notify, msg.result) + } else if msg.stop { + for _, n := range notify { + n <- false + } + + msg.result <- true + + return false + } else if msg.throttle { + if msg.timestamp.After(throttledUntil) { + throttledUntil = msg.timestamp + + if !t.Stop() { + <-t.C() + } + + t.Reset(throttledUntil.Sub(currentClock.Now())) + } + } + } + } +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/transmitter.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/transmitter.go new file mode 100644 index 0000000000..beee8dbb25 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/transmitter.go @@ -0,0 +1,237 @@ +package appinsights + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "io/ioutil" + "net/http" + "sort" + "time" +) + +type transmitter interface { + Transmit(payload []byte, items telemetryBufferItems) (*transmissionResult, error) +} + +type httpTransmitter struct { + endpoint string +} + +type transmissionResult struct { + statusCode int + retryAfter *time.Time + response *backendResponse +} + +// Structures returned by data collector +type backendResponse struct { + ItemsReceived int `json:"itemsReceived"` + ItemsAccepted int `json:"itemsAccepted"` + Errors itemTransmissionResults `json:"errors"` +} + +// This needs to be its own type because it implements sort.Interface +type itemTransmissionResults []*itemTransmissionResult + +type itemTransmissionResult struct { + Index int `json:"index"` + StatusCode int `json:"statusCode"` + Message string `json:"message"` +} + +const ( + successResponse = 200 + partialSuccessResponse = 206 + requestTimeoutResponse = 408 + tooManyRequestsResponse = 429 + tooManyRequestsOverExtendedTimeResponse = 439 + errorResponse = 500 + serviceUnavailableResponse = 503 +) + +func newTransmitter(endpointAddress string) transmitter { + return &httpTransmitter{endpointAddress} +} + +func (transmitter *httpTransmitter) Transmit(payload []byte, items telemetryBufferItems) (*transmissionResult, error) { + diagnosticsWriter.Printf("--------- Transmitting %d items ---------", len(items)) + startTime := time.Now() + + // Compress the payload + var postBody bytes.Buffer + gzipWriter := gzip.NewWriter(&postBody) + if _, err := gzipWriter.Write(payload); err != nil { + diagnosticsWriter.Printf("Failed to compress the payload: %s", err.Error()) + gzipWriter.Close() + return nil, err + } + + gzipWriter.Close() + + req, err := http.NewRequest("POST", transmitter.endpoint, &postBody) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("Content-Type", "application/x-json-stream") + req.Header.Set("Accept-Encoding", "gzip, deflate") + + client := http.DefaultClient + resp, err := client.Do(req) + if err != nil { + diagnosticsWriter.Printf("Failed to transmit telemetry: %s", err.Error()) + return nil, err + } + + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + diagnosticsWriter.Printf("Failed to read response from server: %s", err.Error()) + return nil, err + } + + duration := time.Since(startTime) + + result := &transmissionResult{statusCode: resp.StatusCode} + + // Grab Retry-After header + if retryAfterValue, ok := resp.Header[http.CanonicalHeaderKey("Retry-After")]; ok && len(retryAfterValue) == 1 { + if retryAfterTime, err := time.Parse(time.RFC1123, retryAfterValue[0]); err == nil { + result.retryAfter = &retryAfterTime + } + } + + // Parse body, if possible + response := &backendResponse{} + if err := json.Unmarshal(body, &response); err == nil { + result.response = response + } + + // Write diagnostics + if diagnosticsWriter.hasListeners() { + diagnosticsWriter.Printf("Telemetry transmitted in %s", duration) + diagnosticsWriter.Printf("Response: %d", result.statusCode) + if result.response != nil { + diagnosticsWriter.Printf("Items accepted/received: %d/%d", result.response.ItemsAccepted, result.response.ItemsReceived) + if len(result.response.Errors) > 0 { + diagnosticsWriter.Printf("Errors:") + for _, err := range result.response.Errors { + if err.Index < len(items) { + diagnosticsWriter.Printf("#%d - %d %s", err.Index, err.StatusCode, err.Message) + diagnosticsWriter.Printf("Telemetry item:\n\t%s", string(items[err.Index:err.Index+1].serialize())) + } + } + } + } + } + + return result, nil +} + +func (result *transmissionResult) IsSuccess() bool { + return result.statusCode == successResponse || + // Partial response but all items accepted + (result.statusCode == partialSuccessResponse && + result.response != nil && + result.response.ItemsReceived == result.response.ItemsAccepted) +} + +func (result *transmissionResult) IsFailure() bool { + return result.statusCode != successResponse && result.statusCode != partialSuccessResponse +} + +func (result *transmissionResult) CanRetry() bool { + if result.IsSuccess() { + return false + } + + return result.statusCode == partialSuccessResponse || + result.retryAfter != nil || + (result.statusCode == requestTimeoutResponse || + result.statusCode == serviceUnavailableResponse || + result.statusCode == errorResponse || + result.statusCode == tooManyRequestsResponse || + result.statusCode == tooManyRequestsOverExtendedTimeResponse) +} + +func (result *transmissionResult) IsPartialSuccess() bool { + return result.statusCode == partialSuccessResponse && + result.response != nil && + result.response.ItemsReceived != result.response.ItemsAccepted +} + +func (result *transmissionResult) IsThrottled() bool { + return result.statusCode == tooManyRequestsResponse || + result.statusCode == tooManyRequestsOverExtendedTimeResponse || + result.retryAfter != nil +} + +func (result *itemTransmissionResult) CanRetry() bool { + return result.StatusCode == requestTimeoutResponse || + result.StatusCode == serviceUnavailableResponse || + result.StatusCode == errorResponse || + result.StatusCode == tooManyRequestsResponse || + result.StatusCode == tooManyRequestsOverExtendedTimeResponse +} + +func (result *transmissionResult) GetRetryItems(payload []byte, items telemetryBufferItems) ([]byte, telemetryBufferItems) { + if result.statusCode == partialSuccessResponse && result.response != nil { + // Make sure errors are ordered by index + sort.Sort(result.response.Errors) + + var resultPayload bytes.Buffer + resultItems := make(telemetryBufferItems, 0) + ptr := 0 + idx := 0 + + // Find each retryable error + for _, responseResult := range result.response.Errors { + if responseResult.CanRetry() { + // Advance ptr to start of desired line + for ; idx < responseResult.Index && ptr < len(payload); ptr++ { + if payload[ptr] == '\n' { + idx++ + } + } + + startPtr := ptr + + // Read to end of line + for ; idx == responseResult.Index && ptr < len(payload); ptr++ { + if payload[ptr] == '\n' { + idx++ + } + } + + // Copy item into output buffer + resultPayload.Write(payload[startPtr:ptr]) + resultItems = append(resultItems, items[responseResult.Index]) + } + } + + return resultPayload.Bytes(), resultItems + } else if result.CanRetry() { + return payload, items + } else { + return payload[:0], items[:0] + } +} + +// sort.Interface implementation for Errors[] list + +func (results itemTransmissionResults) Len() int { + return len(results) +} + +func (results itemTransmissionResults) Less(i, j int) bool { + return results[i].Index < results[j].Index +} + +func (results itemTransmissionResults) Swap(i, j int) { + tmp := results[i] + results[i] = results[j] + results[j] = tmp +} diff --git a/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/uuid.go b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/uuid.go new file mode 100644 index 0000000000..6e433ce9e8 --- /dev/null +++ b/vendor/github.com/Microsoft/ApplicationInsights-Go/appinsights/uuid.go @@ -0,0 +1,71 @@ +package appinsights + +import ( + crand "crypto/rand" + "encoding/binary" + "io" + "math/rand" + "sync" + "time" + + "github.com/satori/go.uuid" +) + +// uuidGenerator is a wrapper for satori/go.uuid, used for a few reasons: +// - Avoids build failures due to version differences when a project imports us but +// does not respect our vendoring. (satori/go.uuid#77, #71, #66, ...) +// - Avoids error output when creaing new UUID's: if the crypto reader fails, +// this will fallback on the standard library PRNG, since this is never used +// for a sensitive application. +// - Uses io.ReadFull to guarantee fully-populated UUID's (satori/go.uuid#73) +type uuidGenerator struct { + sync.Mutex + fallbackRand *rand.Rand + reader io.Reader +} + +var uuidgen *uuidGenerator = newUuidGenerator(crand.Reader) + +// newUuidGenerator creates a new uuiGenerator with the specified crypto random reader. +func newUuidGenerator(reader io.Reader) *uuidGenerator { + // Setup seed for fallback random generator + var seed int64 + b := make([]byte, 8) + if _, err := io.ReadFull(reader, b); err == nil { + seed = int64(binary.BigEndian.Uint64(b)) + } else { + // Otherwise just use the timestamp + seed = time.Now().UTC().UnixNano() + } + + return &uuidGenerator{ + reader: reader, + fallbackRand: rand.New(rand.NewSource(seed)), + } +} + +// newUUID generates a new V4 UUID +func (gen *uuidGenerator) newUUID() uuid.UUID { + u := uuid.UUID{} + if _, err := io.ReadFull(gen.reader, u[:]); err != nil { + gen.fallback(&u) + } + + u.SetVersion(uuid.V4) + u.SetVersion(uuid.VariantRFC4122) + return u +} + +// fallback populates the specified UUID with the standard library's PRNG +func (gen *uuidGenerator) fallback(u *uuid.UUID) { + gen.Lock() + defer gen.Unlock() + + // This does not fail as per documentation + gen.fallbackRand.Read(u[:]) +} + +// newUUID generates a new V4 UUID +func newUUID() uuid.UUID { + return uuidgen.newUUID() +} diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml new file mode 100644 index 0000000000..20dd53b8d3 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/.travis.yml @@ -0,0 +1,23 @@ +language: go +sudo: false +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci +notifications: + email: false diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE new file mode 100644 index 0000000000..926d549870 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md new file mode 100644 index 0000000000..7b1a722dff --- /dev/null +++ b/vendor/github.com/satori/go.uuid/README.md @@ -0,0 +1,65 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires Go >= 1.2. + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2018 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go new file mode 100644 index 0000000000..656892c53e --- /dev/null +++ b/vendor/github.com/satori/go.uuid/codec.go @@ -0,0 +1,206 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// ABNF for supported UUID text representation follows: +// uuid := canonical | hashlike | braced | urn +// plain := canonical | hashlike +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// hashlike := 12hexoct +// braced := '{' plain '}' +// urn := URN ':' UUID-NID ':' plain +// URN := 'urn' +// UUID-NID := 'uuid' +// 12hexoct := 6hexoct 6hexoct +// 6hexoct := 4hexoct 2hexoct +// 4hexoct := 2hexoct 2hexoct +// 2hexoct := hexoct hexoct +// hexoct := hexdig hexdig +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +func (u *UUID) UnmarshalText(text []byte) (err error) { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 36: + return u.decodeCanonical(text) + case 38: + return u.decodeBraced(text) + case 41: + fallthrough + case 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length: %s", text) + } +} + +// decodeCanonical decodes UUID string in format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) (err error) { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + src := t[:] + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return +} + +// decodeHashLike decodes UUID string in format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) (err error) { + src := t[:] + dst := u[:] + + if _, err = hex.Decode(dst, src); err != nil { + return err + } + return +} + +// decodeBraced decodes UUID string in format +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) (err error) { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID string in format +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) (err error) { + total := len(t) + + urn_uuid_prefix := t[:9] + + if !bytes.Equal(urn_uuid_prefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format: %s", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID string in canonical format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) (err error) { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrrect UUID length: %s", t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != Size { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go new file mode 100644 index 0000000000..3f2f1da2dc --- /dev/null +++ b/vendor/github.com/satori/go.uuid/generator.go @@ -0,0 +1,239 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "hash" + "net" + "os" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +var ( + global = newDefaultGenerator() + + epochFunc = unixTimeFunc + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + return global.NewV1() +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + return global.NewV2(domain) +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return global.NewV3(ns, name) +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + return global.NewV4() +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return global.NewV5(ns, name) +} + +// Generator provides interface for generating UUIDs. +type Generator interface { + NewV1() UUID + NewV2(domain byte) UUID + NewV3(ns UUID, name string) UUID + NewV4() UUID + NewV5(ns UUID, name string) UUID +} + +// Default generator implementation. +type generator struct { + storageOnce sync.Once + storageMutex sync.Mutex + + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +func newDefaultGenerator() Generator { + return &generator{} +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func (g *generator) NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func (g *generator) NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(V2) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func (g *generator) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns random generated UUID. +func (g *generator) NewV4() UUID { + u := UUID{} + g.safeRandom(u[:]) + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func (g *generator) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +func (g *generator) initStorage() { + g.initClockSequence() + g.initHardwareAddr() +} + +func (g *generator) initClockSequence() { + buf := make([]byte, 2) + g.safeRandom(buf) + g.clockSequence = binary.BigEndian.Uint16(buf) +} + +func (g *generator) initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(g.hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + g.safeRandom(g.hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + g.hardwareAddr[0] |= 0x01 +} + +func (g *generator) safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func (g *generator) getStorage() (uint64, uint16, []byte) { + g.storageOnce.Do(g.initStorage) + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, g.hardwareAddr[:] +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go new file mode 100644 index 0000000000..56759d3905 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/sql.go @@ -0,0 +1,78 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go new file mode 100644 index 0000000000..a2b8e2ca2a --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -0,0 +1,161 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "encoding/hex" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [Size]byte + +// UUID versions +const ( + _ byte = iota + V1 + V2 + V3 + V4 + V5 +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +}