Vendor aws-sdk-go (dep ensure) (#178)
This commit is contained in:
8980
vendor/github.com/aws/aws-sdk-go/service/machinelearning/api.go
generated
vendored
Normal file
8980
vendor/github.com/aws/aws-sdk-go/service/machinelearning/api.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
33
vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go
generated
vendored
Normal file
33
vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package machinelearning
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
func init() {
|
||||
initRequest = func(r *request.Request) {
|
||||
switch r.Operation.Name {
|
||||
case opPredict:
|
||||
r.Handlers.Build.PushBack(updatePredictEndpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updatePredictEndpoint rewrites the request endpoint to use the
|
||||
// "PredictEndpoint" parameter of the Predict operation.
|
||||
func updatePredictEndpoint(r *request.Request) {
|
||||
if !r.ParamsFilled() {
|
||||
return
|
||||
}
|
||||
|
||||
r.ClientInfo.Endpoint = *r.Params.(*PredictInput).PredictEndpoint
|
||||
|
||||
uri, err := url.Parse(r.ClientInfo.Endpoint)
|
||||
if err != nil {
|
||||
r.Error = err
|
||||
return
|
||||
}
|
||||
r.HTTPRequest.URL = uri
|
||||
}
|
||||
39
vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go
generated
vendored
Normal file
39
vendor/github.com/aws/aws-sdk-go/service/machinelearning/customizations_test.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package machinelearning_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/awstesting/unit"
|
||||
"github.com/aws/aws-sdk-go/service/machinelearning"
|
||||
)
|
||||
|
||||
func TestPredictEndpoint(t *testing.T) {
|
||||
ml := machinelearning.New(unit.Session)
|
||||
ml.Handlers.Send.Clear()
|
||||
ml.Handlers.Send.PushBack(func(r *request.Request) {
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: 200,
|
||||
Header: http.Header{},
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
|
||||
}
|
||||
})
|
||||
|
||||
req, _ := ml.PredictRequest(&machinelearning.PredictInput{
|
||||
PredictEndpoint: aws.String("https://localhost/endpoint"),
|
||||
MLModelId: aws.String("id"),
|
||||
Record: map[string]*string{},
|
||||
})
|
||||
err := req.Send()
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("expect no error, got %v", err)
|
||||
}
|
||||
if e, a := "https://localhost/endpoint", req.HTTPRequest.URL.String(); e != a {
|
||||
t.Errorf("expect %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
26
vendor/github.com/aws/aws-sdk-go/service/machinelearning/doc.go
generated
vendored
Normal file
26
vendor/github.com/aws/aws-sdk-go/service/machinelearning/doc.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package machinelearning provides the client and types for making API
|
||||
// requests to Amazon Machine Learning.
|
||||
//
|
||||
// Definition of the public APIs exposed by Amazon Machine Learning
|
||||
//
|
||||
// See machinelearning package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/machinelearning/
|
||||
//
|
||||
// Using the Client
|
||||
//
|
||||
// To contact Amazon Machine Learning with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the Amazon Machine Learning client MachineLearning for more
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/machinelearning/#New
|
||||
package machinelearning
|
||||
54
vendor/github.com/aws/aws-sdk-go/service/machinelearning/errors.go
generated
vendored
Normal file
54
vendor/github.com/aws/aws-sdk-go/service/machinelearning/errors.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package machinelearning
|
||||
|
||||
const (
|
||||
|
||||
// ErrCodeIdempotentParameterMismatchException for service response error code
|
||||
// "IdempotentParameterMismatchException".
|
||||
//
|
||||
// A second request to use or change an object was not allowed. This can result
|
||||
// from retrying a request using a parameter that was not present in the original
|
||||
// request.
|
||||
ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException"
|
||||
|
||||
// ErrCodeInternalServerException for service response error code
|
||||
// "InternalServerException".
|
||||
//
|
||||
// An error on the server occurred when trying to process a request.
|
||||
ErrCodeInternalServerException = "InternalServerException"
|
||||
|
||||
// ErrCodeInvalidInputException for service response error code
|
||||
// "InvalidInputException".
|
||||
//
|
||||
// An error on the client occurred. Typically, the cause is an invalid input
|
||||
// value.
|
||||
ErrCodeInvalidInputException = "InvalidInputException"
|
||||
|
||||
// ErrCodeInvalidTagException for service response error code
|
||||
// "InvalidTagException".
|
||||
ErrCodeInvalidTagException = "InvalidTagException"
|
||||
|
||||
// ErrCodeLimitExceededException for service response error code
|
||||
// "LimitExceededException".
|
||||
//
|
||||
// The subscriber exceeded the maximum number of operations. This exception
|
||||
// can occur when listing objects such as DataSource.
|
||||
ErrCodeLimitExceededException = "LimitExceededException"
|
||||
|
||||
// ErrCodePredictorNotMountedException for service response error code
|
||||
// "PredictorNotMountedException".
|
||||
//
|
||||
// The exception is thrown when a predict request is made to an unmounted MLModel.
|
||||
ErrCodePredictorNotMountedException = "PredictorNotMountedException"
|
||||
|
||||
// ErrCodeResourceNotFoundException for service response error code
|
||||
// "ResourceNotFoundException".
|
||||
//
|
||||
// A specified resource cannot be located.
|
||||
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
|
||||
|
||||
// ErrCodeTagLimitExceededException for service response error code
|
||||
// "TagLimitExceededException".
|
||||
ErrCodeTagLimitExceededException = "TagLimitExceededException"
|
||||
)
|
||||
200
vendor/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go
generated
vendored
Normal file
200
vendor/github.com/aws/aws-sdk-go/service/machinelearning/machinelearningiface/interface.go
generated
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package machinelearningiface provides an interface to enable mocking the Amazon Machine Learning service client
|
||||
// for testing your code.
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
// and waiters.
|
||||
package machinelearningiface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/machinelearning"
|
||||
)
|
||||
|
||||
// MachineLearningAPI provides an interface to enable mocking the
|
||||
// machinelearning.MachineLearning service client's API operation,
|
||||
// paginators, and waiters. This make unit testing your code that calls out
|
||||
// to the SDK's service client's calls easier.
|
||||
//
|
||||
// The best way to use this interface is so the SDK's service client's calls
|
||||
// can be stubbed out for unit testing your code with the SDK without needing
|
||||
// to inject custom request handlers into the SDK's request pipeline.
|
||||
//
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // Amazon Machine Learning.
|
||||
// func myFunc(svc machinelearningiface.MachineLearningAPI) bool {
|
||||
// // Make svc.AddTags request
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := machinelearning.New(sess)
|
||||
//
|
||||
// myFunc(svc)
|
||||
// }
|
||||
//
|
||||
// In your _test.go file:
|
||||
//
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockMachineLearningClient struct {
|
||||
// machinelearningiface.MachineLearningAPI
|
||||
// }
|
||||
// func (m *mockMachineLearningClient) AddTags(input *machinelearning.AddTagsInput) (*machinelearning.AddTagsOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
//
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockMachineLearningClient{}
|
||||
//
|
||||
// myfunc(mockSvc)
|
||||
//
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
// and waiters. Its suggested to use the pattern above for testing, or using
|
||||
// tooling to generate mocks to satisfy the interfaces.
|
||||
type MachineLearningAPI interface {
|
||||
AddTags(*machinelearning.AddTagsInput) (*machinelearning.AddTagsOutput, error)
|
||||
AddTagsWithContext(aws.Context, *machinelearning.AddTagsInput, ...request.Option) (*machinelearning.AddTagsOutput, error)
|
||||
AddTagsRequest(*machinelearning.AddTagsInput) (*request.Request, *machinelearning.AddTagsOutput)
|
||||
|
||||
CreateBatchPrediction(*machinelearning.CreateBatchPredictionInput) (*machinelearning.CreateBatchPredictionOutput, error)
|
||||
CreateBatchPredictionWithContext(aws.Context, *machinelearning.CreateBatchPredictionInput, ...request.Option) (*machinelearning.CreateBatchPredictionOutput, error)
|
||||
CreateBatchPredictionRequest(*machinelearning.CreateBatchPredictionInput) (*request.Request, *machinelearning.CreateBatchPredictionOutput)
|
||||
|
||||
CreateDataSourceFromRDS(*machinelearning.CreateDataSourceFromRDSInput) (*machinelearning.CreateDataSourceFromRDSOutput, error)
|
||||
CreateDataSourceFromRDSWithContext(aws.Context, *machinelearning.CreateDataSourceFromRDSInput, ...request.Option) (*machinelearning.CreateDataSourceFromRDSOutput, error)
|
||||
CreateDataSourceFromRDSRequest(*machinelearning.CreateDataSourceFromRDSInput) (*request.Request, *machinelearning.CreateDataSourceFromRDSOutput)
|
||||
|
||||
CreateDataSourceFromRedshift(*machinelearning.CreateDataSourceFromRedshiftInput) (*machinelearning.CreateDataSourceFromRedshiftOutput, error)
|
||||
CreateDataSourceFromRedshiftWithContext(aws.Context, *machinelearning.CreateDataSourceFromRedshiftInput, ...request.Option) (*machinelearning.CreateDataSourceFromRedshiftOutput, error)
|
||||
CreateDataSourceFromRedshiftRequest(*machinelearning.CreateDataSourceFromRedshiftInput) (*request.Request, *machinelearning.CreateDataSourceFromRedshiftOutput)
|
||||
|
||||
CreateDataSourceFromS3(*machinelearning.CreateDataSourceFromS3Input) (*machinelearning.CreateDataSourceFromS3Output, error)
|
||||
CreateDataSourceFromS3WithContext(aws.Context, *machinelearning.CreateDataSourceFromS3Input, ...request.Option) (*machinelearning.CreateDataSourceFromS3Output, error)
|
||||
CreateDataSourceFromS3Request(*machinelearning.CreateDataSourceFromS3Input) (*request.Request, *machinelearning.CreateDataSourceFromS3Output)
|
||||
|
||||
CreateEvaluation(*machinelearning.CreateEvaluationInput) (*machinelearning.CreateEvaluationOutput, error)
|
||||
CreateEvaluationWithContext(aws.Context, *machinelearning.CreateEvaluationInput, ...request.Option) (*machinelearning.CreateEvaluationOutput, error)
|
||||
CreateEvaluationRequest(*machinelearning.CreateEvaluationInput) (*request.Request, *machinelearning.CreateEvaluationOutput)
|
||||
|
||||
CreateMLModel(*machinelearning.CreateMLModelInput) (*machinelearning.CreateMLModelOutput, error)
|
||||
CreateMLModelWithContext(aws.Context, *machinelearning.CreateMLModelInput, ...request.Option) (*machinelearning.CreateMLModelOutput, error)
|
||||
CreateMLModelRequest(*machinelearning.CreateMLModelInput) (*request.Request, *machinelearning.CreateMLModelOutput)
|
||||
|
||||
CreateRealtimeEndpoint(*machinelearning.CreateRealtimeEndpointInput) (*machinelearning.CreateRealtimeEndpointOutput, error)
|
||||
CreateRealtimeEndpointWithContext(aws.Context, *machinelearning.CreateRealtimeEndpointInput, ...request.Option) (*machinelearning.CreateRealtimeEndpointOutput, error)
|
||||
CreateRealtimeEndpointRequest(*machinelearning.CreateRealtimeEndpointInput) (*request.Request, *machinelearning.CreateRealtimeEndpointOutput)
|
||||
|
||||
DeleteBatchPrediction(*machinelearning.DeleteBatchPredictionInput) (*machinelearning.DeleteBatchPredictionOutput, error)
|
||||
DeleteBatchPredictionWithContext(aws.Context, *machinelearning.DeleteBatchPredictionInput, ...request.Option) (*machinelearning.DeleteBatchPredictionOutput, error)
|
||||
DeleteBatchPredictionRequest(*machinelearning.DeleteBatchPredictionInput) (*request.Request, *machinelearning.DeleteBatchPredictionOutput)
|
||||
|
||||
DeleteDataSource(*machinelearning.DeleteDataSourceInput) (*machinelearning.DeleteDataSourceOutput, error)
|
||||
DeleteDataSourceWithContext(aws.Context, *machinelearning.DeleteDataSourceInput, ...request.Option) (*machinelearning.DeleteDataSourceOutput, error)
|
||||
DeleteDataSourceRequest(*machinelearning.DeleteDataSourceInput) (*request.Request, *machinelearning.DeleteDataSourceOutput)
|
||||
|
||||
DeleteEvaluation(*machinelearning.DeleteEvaluationInput) (*machinelearning.DeleteEvaluationOutput, error)
|
||||
DeleteEvaluationWithContext(aws.Context, *machinelearning.DeleteEvaluationInput, ...request.Option) (*machinelearning.DeleteEvaluationOutput, error)
|
||||
DeleteEvaluationRequest(*machinelearning.DeleteEvaluationInput) (*request.Request, *machinelearning.DeleteEvaluationOutput)
|
||||
|
||||
DeleteMLModel(*machinelearning.DeleteMLModelInput) (*machinelearning.DeleteMLModelOutput, error)
|
||||
DeleteMLModelWithContext(aws.Context, *machinelearning.DeleteMLModelInput, ...request.Option) (*machinelearning.DeleteMLModelOutput, error)
|
||||
DeleteMLModelRequest(*machinelearning.DeleteMLModelInput) (*request.Request, *machinelearning.DeleteMLModelOutput)
|
||||
|
||||
DeleteRealtimeEndpoint(*machinelearning.DeleteRealtimeEndpointInput) (*machinelearning.DeleteRealtimeEndpointOutput, error)
|
||||
DeleteRealtimeEndpointWithContext(aws.Context, *machinelearning.DeleteRealtimeEndpointInput, ...request.Option) (*machinelearning.DeleteRealtimeEndpointOutput, error)
|
||||
DeleteRealtimeEndpointRequest(*machinelearning.DeleteRealtimeEndpointInput) (*request.Request, *machinelearning.DeleteRealtimeEndpointOutput)
|
||||
|
||||
DeleteTags(*machinelearning.DeleteTagsInput) (*machinelearning.DeleteTagsOutput, error)
|
||||
DeleteTagsWithContext(aws.Context, *machinelearning.DeleteTagsInput, ...request.Option) (*machinelearning.DeleteTagsOutput, error)
|
||||
DeleteTagsRequest(*machinelearning.DeleteTagsInput) (*request.Request, *machinelearning.DeleteTagsOutput)
|
||||
|
||||
DescribeBatchPredictions(*machinelearning.DescribeBatchPredictionsInput) (*machinelearning.DescribeBatchPredictionsOutput, error)
|
||||
DescribeBatchPredictionsWithContext(aws.Context, *machinelearning.DescribeBatchPredictionsInput, ...request.Option) (*machinelearning.DescribeBatchPredictionsOutput, error)
|
||||
DescribeBatchPredictionsRequest(*machinelearning.DescribeBatchPredictionsInput) (*request.Request, *machinelearning.DescribeBatchPredictionsOutput)
|
||||
|
||||
DescribeBatchPredictionsPages(*machinelearning.DescribeBatchPredictionsInput, func(*machinelearning.DescribeBatchPredictionsOutput, bool) bool) error
|
||||
DescribeBatchPredictionsPagesWithContext(aws.Context, *machinelearning.DescribeBatchPredictionsInput, func(*machinelearning.DescribeBatchPredictionsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeDataSources(*machinelearning.DescribeDataSourcesInput) (*machinelearning.DescribeDataSourcesOutput, error)
|
||||
DescribeDataSourcesWithContext(aws.Context, *machinelearning.DescribeDataSourcesInput, ...request.Option) (*machinelearning.DescribeDataSourcesOutput, error)
|
||||
DescribeDataSourcesRequest(*machinelearning.DescribeDataSourcesInput) (*request.Request, *machinelearning.DescribeDataSourcesOutput)
|
||||
|
||||
DescribeDataSourcesPages(*machinelearning.DescribeDataSourcesInput, func(*machinelearning.DescribeDataSourcesOutput, bool) bool) error
|
||||
DescribeDataSourcesPagesWithContext(aws.Context, *machinelearning.DescribeDataSourcesInput, func(*machinelearning.DescribeDataSourcesOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeEvaluations(*machinelearning.DescribeEvaluationsInput) (*machinelearning.DescribeEvaluationsOutput, error)
|
||||
DescribeEvaluationsWithContext(aws.Context, *machinelearning.DescribeEvaluationsInput, ...request.Option) (*machinelearning.DescribeEvaluationsOutput, error)
|
||||
DescribeEvaluationsRequest(*machinelearning.DescribeEvaluationsInput) (*request.Request, *machinelearning.DescribeEvaluationsOutput)
|
||||
|
||||
DescribeEvaluationsPages(*machinelearning.DescribeEvaluationsInput, func(*machinelearning.DescribeEvaluationsOutput, bool) bool) error
|
||||
DescribeEvaluationsPagesWithContext(aws.Context, *machinelearning.DescribeEvaluationsInput, func(*machinelearning.DescribeEvaluationsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeMLModels(*machinelearning.DescribeMLModelsInput) (*machinelearning.DescribeMLModelsOutput, error)
|
||||
DescribeMLModelsWithContext(aws.Context, *machinelearning.DescribeMLModelsInput, ...request.Option) (*machinelearning.DescribeMLModelsOutput, error)
|
||||
DescribeMLModelsRequest(*machinelearning.DescribeMLModelsInput) (*request.Request, *machinelearning.DescribeMLModelsOutput)
|
||||
|
||||
DescribeMLModelsPages(*machinelearning.DescribeMLModelsInput, func(*machinelearning.DescribeMLModelsOutput, bool) bool) error
|
||||
DescribeMLModelsPagesWithContext(aws.Context, *machinelearning.DescribeMLModelsInput, func(*machinelearning.DescribeMLModelsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeTags(*machinelearning.DescribeTagsInput) (*machinelearning.DescribeTagsOutput, error)
|
||||
DescribeTagsWithContext(aws.Context, *machinelearning.DescribeTagsInput, ...request.Option) (*machinelearning.DescribeTagsOutput, error)
|
||||
DescribeTagsRequest(*machinelearning.DescribeTagsInput) (*request.Request, *machinelearning.DescribeTagsOutput)
|
||||
|
||||
GetBatchPrediction(*machinelearning.GetBatchPredictionInput) (*machinelearning.GetBatchPredictionOutput, error)
|
||||
GetBatchPredictionWithContext(aws.Context, *machinelearning.GetBatchPredictionInput, ...request.Option) (*machinelearning.GetBatchPredictionOutput, error)
|
||||
GetBatchPredictionRequest(*machinelearning.GetBatchPredictionInput) (*request.Request, *machinelearning.GetBatchPredictionOutput)
|
||||
|
||||
GetDataSource(*machinelearning.GetDataSourceInput) (*machinelearning.GetDataSourceOutput, error)
|
||||
GetDataSourceWithContext(aws.Context, *machinelearning.GetDataSourceInput, ...request.Option) (*machinelearning.GetDataSourceOutput, error)
|
||||
GetDataSourceRequest(*machinelearning.GetDataSourceInput) (*request.Request, *machinelearning.GetDataSourceOutput)
|
||||
|
||||
GetEvaluation(*machinelearning.GetEvaluationInput) (*machinelearning.GetEvaluationOutput, error)
|
||||
GetEvaluationWithContext(aws.Context, *machinelearning.GetEvaluationInput, ...request.Option) (*machinelearning.GetEvaluationOutput, error)
|
||||
GetEvaluationRequest(*machinelearning.GetEvaluationInput) (*request.Request, *machinelearning.GetEvaluationOutput)
|
||||
|
||||
GetMLModel(*machinelearning.GetMLModelInput) (*machinelearning.GetMLModelOutput, error)
|
||||
GetMLModelWithContext(aws.Context, *machinelearning.GetMLModelInput, ...request.Option) (*machinelearning.GetMLModelOutput, error)
|
||||
GetMLModelRequest(*machinelearning.GetMLModelInput) (*request.Request, *machinelearning.GetMLModelOutput)
|
||||
|
||||
Predict(*machinelearning.PredictInput) (*machinelearning.PredictOutput, error)
|
||||
PredictWithContext(aws.Context, *machinelearning.PredictInput, ...request.Option) (*machinelearning.PredictOutput, error)
|
||||
PredictRequest(*machinelearning.PredictInput) (*request.Request, *machinelearning.PredictOutput)
|
||||
|
||||
UpdateBatchPrediction(*machinelearning.UpdateBatchPredictionInput) (*machinelearning.UpdateBatchPredictionOutput, error)
|
||||
UpdateBatchPredictionWithContext(aws.Context, *machinelearning.UpdateBatchPredictionInput, ...request.Option) (*machinelearning.UpdateBatchPredictionOutput, error)
|
||||
UpdateBatchPredictionRequest(*machinelearning.UpdateBatchPredictionInput) (*request.Request, *machinelearning.UpdateBatchPredictionOutput)
|
||||
|
||||
UpdateDataSource(*machinelearning.UpdateDataSourceInput) (*machinelearning.UpdateDataSourceOutput, error)
|
||||
UpdateDataSourceWithContext(aws.Context, *machinelearning.UpdateDataSourceInput, ...request.Option) (*machinelearning.UpdateDataSourceOutput, error)
|
||||
UpdateDataSourceRequest(*machinelearning.UpdateDataSourceInput) (*request.Request, *machinelearning.UpdateDataSourceOutput)
|
||||
|
||||
UpdateEvaluation(*machinelearning.UpdateEvaluationInput) (*machinelearning.UpdateEvaluationOutput, error)
|
||||
UpdateEvaluationWithContext(aws.Context, *machinelearning.UpdateEvaluationInput, ...request.Option) (*machinelearning.UpdateEvaluationOutput, error)
|
||||
UpdateEvaluationRequest(*machinelearning.UpdateEvaluationInput) (*request.Request, *machinelearning.UpdateEvaluationOutput)
|
||||
|
||||
UpdateMLModel(*machinelearning.UpdateMLModelInput) (*machinelearning.UpdateMLModelOutput, error)
|
||||
UpdateMLModelWithContext(aws.Context, *machinelearning.UpdateMLModelInput, ...request.Option) (*machinelearning.UpdateMLModelOutput, error)
|
||||
UpdateMLModelRequest(*machinelearning.UpdateMLModelInput) (*request.Request, *machinelearning.UpdateMLModelOutput)
|
||||
|
||||
WaitUntilBatchPredictionAvailable(*machinelearning.DescribeBatchPredictionsInput) error
|
||||
WaitUntilBatchPredictionAvailableWithContext(aws.Context, *machinelearning.DescribeBatchPredictionsInput, ...request.WaiterOption) error
|
||||
|
||||
WaitUntilDataSourceAvailable(*machinelearning.DescribeDataSourcesInput) error
|
||||
WaitUntilDataSourceAvailableWithContext(aws.Context, *machinelearning.DescribeDataSourcesInput, ...request.WaiterOption) error
|
||||
|
||||
WaitUntilEvaluationAvailable(*machinelearning.DescribeEvaluationsInput) error
|
||||
WaitUntilEvaluationAvailableWithContext(aws.Context, *machinelearning.DescribeEvaluationsInput, ...request.WaiterOption) error
|
||||
|
||||
WaitUntilMLModelAvailable(*machinelearning.DescribeMLModelsInput) error
|
||||
WaitUntilMLModelAvailableWithContext(aws.Context, *machinelearning.DescribeMLModelsInput, ...request.WaiterOption) error
|
||||
}
|
||||
|
||||
var _ MachineLearningAPI = (*machinelearning.MachineLearning)(nil)
|
||||
95
vendor/github.com/aws/aws-sdk-go/service/machinelearning/service.go
generated
vendored
Normal file
95
vendor/github.com/aws/aws-sdk-go/service/machinelearning/service.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package machinelearning
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||
)
|
||||
|
||||
// MachineLearning provides the API operation methods for making requests to
|
||||
// Amazon Machine Learning. See this package's package overview docs
|
||||
// for details on the service.
|
||||
//
|
||||
// MachineLearning methods are safe to use concurrently. It is not safe to
|
||||
// modify mutate any of the struct's properties though.
|
||||
type MachineLearning struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "machinelearning" // Service endpoint prefix API calls made to.
|
||||
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
|
||||
)
|
||||
|
||||
// New creates a new instance of the MachineLearning client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// // Create a MachineLearning client from just a session.
|
||||
// svc := machinelearning.New(mySession)
|
||||
//
|
||||
// // Create a MachineLearning client with additional configuration
|
||||
// svc := machinelearning.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *MachineLearning {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MachineLearning {
|
||||
svc := &MachineLearning{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2014-12-12",
|
||||
JSONVersion: "1.1",
|
||||
TargetPrefix: "AmazonML_20141212",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a MachineLearning operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *MachineLearning) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
214
vendor/github.com/aws/aws-sdk-go/service/machinelearning/waiters.go
generated
vendored
Normal file
214
vendor/github.com/aws/aws-sdk-go/service/machinelearning/waiters.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package machinelearning
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// WaitUntilBatchPredictionAvailable uses the Amazon Machine Learning API operation
|
||||
// DescribeBatchPredictions to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *MachineLearning) WaitUntilBatchPredictionAvailable(input *DescribeBatchPredictionsInput) error {
|
||||
return c.WaitUntilBatchPredictionAvailableWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilBatchPredictionAvailableWithContext is an extended version of WaitUntilBatchPredictionAvailable.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *MachineLearning) WaitUntilBatchPredictionAvailableWithContext(ctx aws.Context, input *DescribeBatchPredictionsInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilBatchPredictionAvailable",
|
||||
MaxAttempts: 60,
|
||||
Delay: request.ConstantWaiterDelay(30 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathAllWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "COMPLETED",
|
||||
},
|
||||
{
|
||||
State: request.FailureWaiterState,
|
||||
Matcher: request.PathAnyWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "FAILED",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *DescribeBatchPredictionsInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeBatchPredictionsRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
// WaitUntilDataSourceAvailable uses the Amazon Machine Learning API operation
|
||||
// DescribeDataSources to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *MachineLearning) WaitUntilDataSourceAvailable(input *DescribeDataSourcesInput) error {
|
||||
return c.WaitUntilDataSourceAvailableWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilDataSourceAvailableWithContext is an extended version of WaitUntilDataSourceAvailable.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *MachineLearning) WaitUntilDataSourceAvailableWithContext(ctx aws.Context, input *DescribeDataSourcesInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilDataSourceAvailable",
|
||||
MaxAttempts: 60,
|
||||
Delay: request.ConstantWaiterDelay(30 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathAllWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "COMPLETED",
|
||||
},
|
||||
{
|
||||
State: request.FailureWaiterState,
|
||||
Matcher: request.PathAnyWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "FAILED",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *DescribeDataSourcesInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeDataSourcesRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
// WaitUntilEvaluationAvailable uses the Amazon Machine Learning API operation
|
||||
// DescribeEvaluations to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *MachineLearning) WaitUntilEvaluationAvailable(input *DescribeEvaluationsInput) error {
|
||||
return c.WaitUntilEvaluationAvailableWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilEvaluationAvailableWithContext is an extended version of WaitUntilEvaluationAvailable.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *MachineLearning) WaitUntilEvaluationAvailableWithContext(ctx aws.Context, input *DescribeEvaluationsInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilEvaluationAvailable",
|
||||
MaxAttempts: 60,
|
||||
Delay: request.ConstantWaiterDelay(30 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathAllWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "COMPLETED",
|
||||
},
|
||||
{
|
||||
State: request.FailureWaiterState,
|
||||
Matcher: request.PathAnyWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "FAILED",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *DescribeEvaluationsInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeEvaluationsRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
|
||||
// WaitUntilMLModelAvailable uses the Amazon Machine Learning API operation
|
||||
// DescribeMLModels to wait for a condition to be met before returning.
|
||||
// If the condition is not met within the max attempt window, an error will
|
||||
// be returned.
|
||||
func (c *MachineLearning) WaitUntilMLModelAvailable(input *DescribeMLModelsInput) error {
|
||||
return c.WaitUntilMLModelAvailableWithContext(aws.BackgroundContext(), input)
|
||||
}
|
||||
|
||||
// WaitUntilMLModelAvailableWithContext is an extended version of WaitUntilMLModelAvailable.
|
||||
// With the support for passing in a context and options to configure the
|
||||
// Waiter and the underlying request options.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *MachineLearning) WaitUntilMLModelAvailableWithContext(ctx aws.Context, input *DescribeMLModelsInput, opts ...request.WaiterOption) error {
|
||||
w := request.Waiter{
|
||||
Name: "WaitUntilMLModelAvailable",
|
||||
MaxAttempts: 60,
|
||||
Delay: request.ConstantWaiterDelay(30 * time.Second),
|
||||
Acceptors: []request.WaiterAcceptor{
|
||||
{
|
||||
State: request.SuccessWaiterState,
|
||||
Matcher: request.PathAllWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "COMPLETED",
|
||||
},
|
||||
{
|
||||
State: request.FailureWaiterState,
|
||||
Matcher: request.PathAnyWaiterMatch, Argument: "Results[].Status",
|
||||
Expected: "FAILED",
|
||||
},
|
||||
},
|
||||
Logger: c.Config.Logger,
|
||||
NewRequest: func(opts []request.Option) (*request.Request, error) {
|
||||
var inCpy *DescribeMLModelsInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.DescribeMLModelsRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
w.ApplyOptions(opts...)
|
||||
|
||||
return w.WaitWithContext(ctx)
|
||||
}
|
||||
Reference in New Issue
Block a user