1// Code generated by smithy-go-codegen DO NOT EDIT. 2 3package types 4 5import ( 6 smithydocument "github.com/aws/smithy-go/document" 7 "time" 8) 9 10// Represents the output of a GetBatchPrediction operation. The content consists of 11// the detailed metadata, the status, and the data file information of a Batch 12// Prediction. 13type BatchPrediction struct { 14 15 // The ID of the DataSource that points to the group of observations to predict. 16 BatchPredictionDataSourceId *string 17 18 // The ID assigned to the BatchPrediction at creation. This value should be 19 // identical to the value of the BatchPredictionID in the request. 20 BatchPredictionId *string 21 22 // Long integer type that is a 64-bit signed number. 23 ComputeTime *int64 24 25 // The time that the BatchPrediction was created. The time is expressed in epoch 26 // time. 27 CreatedAt *time.Time 28 29 // The AWS user account that invoked the BatchPrediction. The account type can be 30 // either an AWS root account or an AWS Identity and Access Management (IAM) user 31 // account. 32 CreatedByIamUser *string 33 34 // A timestamp represented in epoch time. 35 FinishedAt *time.Time 36 37 // The location of the data file or directory in Amazon Simple Storage Service 38 // (Amazon S3). 39 InputDataLocationS3 *string 40 41 // Long integer type that is a 64-bit signed number. 42 InvalidRecordCount *int64 43 44 // The time of the most recent edit to the BatchPrediction. The time is expressed 45 // in epoch time. 46 LastUpdatedAt *time.Time 47 48 // The ID of the MLModel that generated predictions for the BatchPrediction 49 // request. 50 MLModelId *string 51 52 // A description of the most recent details about processing the batch prediction 53 // request. 54 Message *string 55 56 // A user-supplied name or description of the BatchPrediction. 57 Name *string 58 59 // The location of an Amazon S3 bucket or directory to receive the operation 60 // results. The following substrings are not allowed in the s3 key portion of the 61 // outputURI field: ':', '//', '/./', '/../'. 62 OutputUri *string 63 64 // A timestamp represented in epoch time. 65 StartedAt *time.Time 66 67 // The status of the BatchPrediction. This element can have one of the following 68 // values: 69 // 70 // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to 71 // generate predictions for a batch of observations. 72 // 73 // * INPROGRESS - The process is 74 // underway. 75 // 76 // * FAILED - The request to perform a batch prediction did not run to 77 // completion. It is not usable. 78 // 79 // * COMPLETED - The batch prediction process 80 // completed successfully. 81 // 82 // * DELETED - The BatchPrediction is marked as deleted. 83 // It is not usable. 84 Status EntityStatus 85 86 // Long integer type that is a 64-bit signed number. 87 TotalRecordCount *int64 88 89 noSmithyDocumentSerde 90} 91 92// Represents the output of the GetDataSource operation. The content consists of 93// the detailed metadata and data file information and the current status of the 94// DataSource. 95type DataSource struct { 96 97 // The parameter is true if statistics need to be generated from the observation 98 // data. 99 ComputeStatistics bool 100 101 // Long integer type that is a 64-bit signed number. 102 ComputeTime *int64 103 104 // The time that the DataSource was created. The time is expressed in epoch time. 105 CreatedAt *time.Time 106 107 // The AWS user account from which the DataSource was created. The account type can 108 // be either an AWS root account or an AWS Identity and Access Management (IAM) 109 // user account. 110 CreatedByIamUser *string 111 112 // The location and name of the data in Amazon Simple Storage Service (Amazon S3) 113 // that is used by a DataSource. 114 DataLocationS3 *string 115 116 // A JSON string that represents the splitting and rearrangement requirement used 117 // when this DataSource was created. 118 DataRearrangement *string 119 120 // The total number of observations contained in the data files that the DataSource 121 // references. 122 DataSizeInBytes *int64 123 124 // The ID that is assigned to the DataSource during creation. 125 DataSourceId *string 126 127 // A timestamp represented in epoch time. 128 FinishedAt *time.Time 129 130 // The time of the most recent edit to the BatchPrediction. The time is expressed 131 // in epoch time. 132 LastUpdatedAt *time.Time 133 134 // A description of the most recent details about creating the DataSource. 135 Message *string 136 137 // A user-supplied name or description of the DataSource. 138 Name *string 139 140 // The number of data files referenced by the DataSource. 141 NumberOfFiles *int64 142 143 // The datasource details that are specific to Amazon RDS. 144 RDSMetadata *RDSMetadata 145 146 // Describes the DataSource details specific to Amazon Redshift. 147 RedshiftMetadata *RedshiftMetadata 148 149 // The Amazon Resource Name (ARN) of an AWS IAM Role 150 // (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts), 151 // such as the following: arn:aws:iam::account:role/rolename. 152 RoleARN *string 153 154 // A timestamp represented in epoch time. 155 StartedAt *time.Time 156 157 // The current status of the DataSource. This element can have one of the following 158 // values: 159 // 160 // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to 161 // create a DataSource. 162 // 163 // * INPROGRESS - The creation process is underway. 164 // 165 // * FAILED 166 // - The request to create a DataSource did not run to completion. It is not 167 // usable. 168 // 169 // * COMPLETED - The creation process completed successfully. 170 // 171 // * DELETED - 172 // The DataSource is marked as deleted. It is not usable. 173 Status EntityStatus 174 175 noSmithyDocumentSerde 176} 177 178// Represents the output of GetEvaluation operation. The content consists of the 179// detailed metadata and data file information and the current status of the 180// Evaluation. 181type Evaluation struct { 182 183 // Long integer type that is a 64-bit signed number. 184 ComputeTime *int64 185 186 // The time that the Evaluation was created. The time is expressed in epoch time. 187 CreatedAt *time.Time 188 189 // The AWS user account that invoked the evaluation. The account type can be either 190 // an AWS root account or an AWS Identity and Access Management (IAM) user account. 191 CreatedByIamUser *string 192 193 // The ID of the DataSource that is used to evaluate the MLModel. 194 EvaluationDataSourceId *string 195 196 // The ID that is assigned to the Evaluation at creation. 197 EvaluationId *string 198 199 // A timestamp represented in epoch time. 200 FinishedAt *time.Time 201 202 // The location and name of the data in Amazon Simple Storage Server (Amazon S3) 203 // that is used in the evaluation. 204 InputDataLocationS3 *string 205 206 // The time of the most recent edit to the Evaluation. The time is expressed in 207 // epoch time. 208 LastUpdatedAt *time.Time 209 210 // The ID of the MLModel that is the focus of the evaluation. 211 MLModelId *string 212 213 // A description of the most recent details about evaluating the MLModel. 214 Message *string 215 216 // A user-supplied name or description of the Evaluation. 217 Name *string 218 219 // Measurements of how well the MLModel performed, using observations referenced by 220 // the DataSource. One of the following metrics is returned, based on the type of 221 // the MLModel: 222 // 223 // * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) 224 // technique to measure performance. 225 // 226 // * RegressionRMSE: A regression MLModel uses 227 // the Root Mean Square Error (RMSE) technique to measure performance. RMSE 228 // measures the difference between predicted and actual values for a single 229 // variable. 230 // 231 // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score 232 // technique to measure performance. 233 // 234 // For more information about performance 235 // metrics, please see the Amazon Machine Learning Developer Guide 236 // (https://docs.aws.amazon.com/machine-learning/latest/dg). 237 PerformanceMetrics *PerformanceMetrics 238 239 // A timestamp represented in epoch time. 240 StartedAt *time.Time 241 242 // The status of the evaluation. This element can have one of the following 243 // values: 244 // 245 // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to 246 // evaluate an MLModel. 247 // 248 // * INPROGRESS - The evaluation is underway. 249 // 250 // * FAILED - The 251 // request to evaluate an MLModel did not run to completion. It is not usable. 252 // 253 // * 254 // COMPLETED - The evaluation process completed successfully. 255 // 256 // * DELETED - The 257 // Evaluation is marked as deleted. It is not usable. 258 Status EntityStatus 259 260 noSmithyDocumentSerde 261} 262 263// Represents the output of a GetMLModel operation. The content consists of the 264// detailed metadata and the current status of the MLModel. 265type MLModel struct { 266 267 // The algorithm used to train the MLModel. The following algorithm is 268 // supported: 269 // 270 // * SGD -- Stochastic gradient descent. The goal of SGD is to minimize 271 // the gradient of the loss function. 272 Algorithm Algorithm 273 274 // Long integer type that is a 64-bit signed number. 275 ComputeTime *int64 276 277 // The time that the MLModel was created. The time is expressed in epoch time. 278 CreatedAt *time.Time 279 280 // The AWS user account from which the MLModel was created. The account type can be 281 // either an AWS root account or an AWS Identity and Access Management (IAM) user 282 // account. 283 CreatedByIamUser *string 284 285 // The current endpoint of the MLModel. 286 EndpointInfo *RealtimeEndpointInfo 287 288 // A timestamp represented in epoch time. 289 FinishedAt *time.Time 290 291 // The location of the data file or directory in Amazon Simple Storage Service 292 // (Amazon S3). 293 InputDataLocationS3 *string 294 295 // The time of the most recent edit to the MLModel. The time is expressed in epoch 296 // time. 297 LastUpdatedAt *time.Time 298 299 // The ID assigned to the MLModel at creation. 300 MLModelId *string 301 302 // Identifies the MLModel category. The following are the available types: 303 // 304 // * 305 // REGRESSION - Produces a numeric result. For example, "What price should a house 306 // be listed at?" 307 // 308 // * BINARY - Produces one of two possible results. For example, 309 // "Is this a child-friendly web site?". 310 // 311 // * MULTICLASS - Produces one of several 312 // possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?". 313 MLModelType MLModelType 314 315 // A description of the most recent details about accessing the MLModel. 316 Message *string 317 318 // A user-supplied name or description of the MLModel. 319 Name *string 320 321 ScoreThreshold *float32 322 323 // The time of the most recent edit to the ScoreThreshold. The time is expressed in 324 // epoch time. 325 ScoreThresholdLastUpdatedAt *time.Time 326 327 // Long integer type that is a 64-bit signed number. 328 SizeInBytes *int64 329 330 // A timestamp represented in epoch time. 331 StartedAt *time.Time 332 333 // The current status of an MLModel. This element can have one of the following 334 // values: 335 // 336 // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to 337 // create an MLModel. 338 // 339 // * INPROGRESS - The creation process is underway. 340 // 341 // * FAILED - 342 // The request to create an MLModel didn't run to completion. The model isn't 343 // usable. 344 // 345 // * COMPLETED - The creation process completed successfully. 346 // 347 // * DELETED - 348 // The MLModel is marked as deleted. It isn't usable. 349 Status EntityStatus 350 351 // The ID of the training DataSource. The CreateMLModel operation uses the 352 // TrainingDataSourceId. 353 TrainingDataSourceId *string 354 355 // A list of the training parameters in the MLModel. The list is implemented as a 356 // map of key-value pairs. The following is the current set of training 357 // parameters: 358 // 359 // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the 360 // model. Depending on the input data, the size of the model might affect its 361 // performance. The value is an integer that ranges from 100000 to 2147483648. The 362 // default value is 33554432. 363 // 364 // * sgd.maxPasses - The number of times that the 365 // training process traverses the observations to build the MLModel. The value is 366 // an integer that ranges from 1 to 10000. The default value is 10. 367 // 368 // * 369 // sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling the 370 // data improves a model's ability to find the optimal solution for a variety of 371 // data types. The valid values are auto and none. The default value is none. 372 // 373 // * 374 // sgd.l1RegularizationAmount - The coefficient regularization L1 norm, which 375 // controls overfitting the data by penalizing large coefficients. This parameter 376 // tends to drive coefficients to zero, resulting in sparse feature set. If you use 377 // this parameter, start by specifying a small value, such as 1.0E-08. The value is 378 // a double that ranges from 0 to MAX_DOUBLE. The default is to not use L1 379 // normalization. This parameter can't be used when L2 is specified. Use this 380 // parameter sparingly. 381 // 382 // * sgd.l2RegularizationAmount - The coefficient 383 // regularization L2 norm, which controls overfitting the data by penalizing large 384 // coefficients. This tends to drive coefficients to small, nonzero values. If you 385 // use this parameter, start by specifying a small value, such as 1.0E-08. The 386 // value is a double that ranges from 0 to MAX_DOUBLE. The default is to not use L2 387 // normalization. This parameter can't be used when L1 is specified. Use this 388 // parameter sparingly. 389 TrainingParameters map[string]string 390 391 noSmithyDocumentSerde 392} 393 394// Measurements of how well the MLModel performed on known observations. One of the 395// following metrics is returned, based on the type of the MLModel: 396// 397// * BinaryAUC: 398// The binary MLModel uses the Area Under the Curve (AUC) technique to measure 399// performance. 400// 401// * RegressionRMSE: The regression MLModel uses the Root Mean Square 402// Error (RMSE) technique to measure performance. RMSE measures the difference 403// between predicted and actual values for a single variable. 404// 405// * 406// MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique to 407// measure performance. 408// 409// For more information about performance metrics, please see 410// the Amazon Machine Learning Developer Guide 411// (https://docs.aws.amazon.com/machine-learning/latest/dg). 412type PerformanceMetrics struct { 413 Properties map[string]string 414 415 noSmithyDocumentSerde 416} 417 418// The output from a Predict operation: 419// 420// * Details - Contains the following 421// attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | 422// MULTICLASSDetailsAttributes.ALGORITHM - SGD 423// 424// * PredictedLabel - Present for 425// either a BINARY or MULTICLASSMLModel request. 426// 427// * PredictedScores - Contains the 428// raw classification score corresponding to each label. 429// 430// * PredictedValue - 431// Present for a REGRESSIONMLModel request. 432type Prediction struct { 433 434 // Provides any additional details regarding the prediction. 435 Details map[string]string 436 437 // The prediction label for either a BINARY or MULTICLASSMLModel. 438 PredictedLabel *string 439 440 // Provides the raw classification score corresponding to each label. 441 PredictedScores map[string]float32 442 443 // The prediction value for REGRESSIONMLModel. 444 PredictedValue *float32 445 446 noSmithyDocumentSerde 447} 448 449// The database details of an Amazon RDS database. 450type RDSDatabase struct { 451 452 // The name of a database hosted on an RDS DB instance. 453 // 454 // This member is required. 455 DatabaseName *string 456 457 // The ID of an RDS DB instance. 458 // 459 // This member is required. 460 InstanceIdentifier *string 461 462 noSmithyDocumentSerde 463} 464 465// The database credentials to connect to a database on an RDS DB instance. 466type RDSDatabaseCredentials struct { 467 468 // The password to be used by Amazon ML to connect to a database on an RDS DB 469 // instance. The password should have sufficient permissions to execute the 470 // RDSSelectQuery query. 471 // 472 // This member is required. 473 Password *string 474 475 // The username to be used by Amazon ML to connect to database on an Amazon RDS 476 // instance. The username should have sufficient permissions to execute an 477 // RDSSelectSqlQuery query. 478 // 479 // This member is required. 480 Username *string 481 482 noSmithyDocumentSerde 483} 484 485// The data specification of an Amazon Relational Database Service (Amazon RDS) 486// DataSource. 487type RDSDataSpec struct { 488 489 // The AWS Identity and Access Management (IAM) credentials that are used connect 490 // to the Amazon RDS database. 491 // 492 // This member is required. 493 DatabaseCredentials *RDSDatabaseCredentials 494 495 // Describes the DatabaseName and InstanceIdentifier of an Amazon RDS database. 496 // 497 // This member is required. 498 DatabaseInformation *RDSDatabase 499 500 // The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute 501 // Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to 502 // an Amazon S3 task. For more information, see Role templates 503 // (https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) 504 // for data pipelines. 505 // 506 // This member is required. 507 ResourceRole *string 508 509 // The Amazon S3 location for staging Amazon RDS data. The data retrieved from 510 // Amazon RDS using SelectSqlQuery is stored in this location. 511 // 512 // This member is required. 513 S3StagingLocation *string 514 515 // The security group IDs to be used to access a VPC-based RDS DB instance. Ensure 516 // that there are appropriate ingress rules set up to allow access to the RDS DB 517 // instance. This attribute is used by Data Pipeline to carry out the copy 518 // operation from Amazon RDS to an Amazon S3 task. 519 // 520 // This member is required. 521 SecurityGroupIds []string 522 523 // The query that is used to retrieve the observation data for the DataSource. 524 // 525 // This member is required. 526 SelectSqlQuery *string 527 528 // The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to 529 // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more 530 // information, see Role templates 531 // (https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) 532 // for data pipelines. 533 // 534 // This member is required. 535 ServiceRole *string 536 537 // The subnet ID to be used to access a VPC-based RDS DB instance. This attribute 538 // is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon 539 // S3. 540 // 541 // This member is required. 542 SubnetId *string 543 544 // A JSON string that represents the splitting and rearrangement processing to be 545 // applied to a DataSource. If the DataRearrangement parameter is not provided, all 546 // of the input data is used to create the Datasource. There are multiple 547 // parameters that control what data is used to create a datasource: 548 // 549 // * 550 // percentBegin Use percentBegin to indicate the beginning of the range of the data 551 // used to create the Datasource. If you do not include percentBegin and 552 // percentEnd, Amazon ML includes all of the data when creating the datasource. 553 // 554 // * 555 // percentEnd Use percentEnd to indicate the end of the range of the data used to 556 // create the Datasource. If you do not include percentBegin and percentEnd, Amazon 557 // ML includes all of the data when creating the datasource. 558 // 559 // * complement The 560 // complement parameter instructs Amazon ML to use the data that is not included in 561 // the range of percentBegin to percentEnd to create a datasource. The complement 562 // parameter is useful if you need to create complementary datasources for training 563 // and evaluation. To create a complementary datasource, use the same values for 564 // percentBegin and percentEnd, along with the complement parameter. For example, 565 // the following two datasources do not share any data, and can be used to train 566 // and evaluate a model. The first datasource has 25 percent of the data, and the 567 // second one has 75 percent of the data. Datasource for evaluation: 568 // {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: 569 // {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} 570 // 571 // * 572 // strategy To change how Amazon ML splits the data for a datasource, use the 573 // strategy parameter. The default value for the strategy parameter is sequential, 574 // meaning that Amazon ML takes all of the data records between the percentBegin 575 // and percentEnd parameters for the datasource, in the order that the records 576 // appear in the input data. The following two DataRearrangement lines are examples 577 // of sequentially ordered training and evaluation datasources: Datasource for 578 // evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, 579 // "strategy":"sequential"}} Datasource for training: 580 // {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", 581 // "complement":"true"}} To randomly split the input data into the proportions 582 // indicated by the percentBegin and percentEnd parameters, set the strategy 583 // parameter to random and provide a string that is used as the seed value for the 584 // random data splitting (for example, you can use the S3 path to your data as the 585 // random seed string). If you choose the random split strategy, Amazon ML assigns 586 // each row of data a pseudo-random number between 0 and 100, and then selects the 587 // rows that have an assigned number between percentBegin and percentEnd. 588 // Pseudo-random numbers are assigned using both the input seed string value and 589 // the byte offset as a seed, so changing the data results in a different split. 590 // Any existing ordering is preserved. The random splitting strategy ensures that 591 // variables in the training and evaluation data are distributed similarly. It is 592 // useful in the cases where the input data may have an implicit sort order, which 593 // would otherwise result in training and evaluation datasources containing 594 // non-similar data records. The following two DataRearrangement lines are examples 595 // of non-sequentially ordered training and evaluation datasources: Datasource for 596 // evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, 597 // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} Datasource 598 // for training: {"splitting":{"percentBegin":70, "percentEnd":100, 599 // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", 600 // "complement":"true"}} 601 DataRearrangement *string 602 603 // A JSON string that represents the schema for an Amazon RDS DataSource. The 604 // DataSchema defines the structure of the observation data in the data file(s) 605 // referenced in the DataSource. A DataSchema is not required if you specify a 606 // DataSchemaUri Define your DataSchema as a series of key-value pairs. attributes 607 // and excludedVariableNames have an array of key-value pairs for their value. Use 608 // the following format to define your DataSchema. { "version": "1.0", 609 // "recordAnnotationFieldName": "F1", "recordWeightFieldName": "F2", 610 // "targetFieldName": "F3", "dataFormat": "CSV", "dataFileContainsHeader": true, 611 // "attributes": [ { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", 612 // "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { 613 // "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": 614 // "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": 615 // "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": 616 // "WEIGHTED_STRING_SEQUENCE" } ], "excludedVariableNames": [ "F6" ] } 617 DataSchema *string 618 619 // The Amazon S3 location of the DataSchema. 620 DataSchemaUri *string 621 622 noSmithyDocumentSerde 623} 624 625// The datasource details that are specific to Amazon RDS. 626type RDSMetadata struct { 627 628 // The ID of the Data Pipeline instance that is used to carry to copy data from 629 // Amazon RDS to Amazon S3. You can use the ID to find details about the instance 630 // in the Data Pipeline console. 631 DataPipelineId *string 632 633 // The database details required to connect to an Amazon RDS. 634 Database *RDSDatabase 635 636 // The username to be used by Amazon ML to connect to database on an Amazon RDS 637 // instance. The username should have sufficient permissions to execute an 638 // RDSSelectSqlQuery query. 639 DatabaseUserName *string 640 641 // The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to 642 // carry out the copy task from Amazon RDS to Amazon S3. For more information, see 643 // Role templates 644 // (https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) 645 // for data pipelines. 646 ResourceRole *string 647 648 // The SQL query that is supplied during CreateDataSourceFromRDS. Returns only if 649 // Verbose is true in GetDataSourceInput. 650 SelectSqlQuery *string 651 652 // The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to 653 // monitor the progress of the copy task from Amazon RDS to Amazon S3. For more 654 // information, see Role templates 655 // (https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) 656 // for data pipelines. 657 ServiceRole *string 658 659 noSmithyDocumentSerde 660} 661 662// Describes the real-time endpoint information for an MLModel. 663type RealtimeEndpointInfo struct { 664 665 // The time that the request to create the real-time endpoint for the MLModel was 666 // received. The time is expressed in epoch time. 667 CreatedAt *time.Time 668 669 // The current status of the real-time endpoint for the MLModel. This element can 670 // have one of the following values: 671 // 672 // * NONE - Endpoint does not exist or was 673 // previously deleted. 674 // 675 // * READY - Endpoint is ready to be used for real-time 676 // predictions. 677 // 678 // * UPDATING - Updating/creating the endpoint. 679 EndpointStatus RealtimeEndpointStatus 680 681 // The URI that specifies where to send real-time prediction requests for the 682 // MLModel. Note: The application must wait until the real-time endpoint is ready 683 // before using this URI. 684 EndpointUrl *string 685 686 // The maximum processing rate for the real-time endpoint for MLModel, measured in 687 // incoming requests per second. 688 PeakRequestsPerSecond int32 689 690 noSmithyDocumentSerde 691} 692 693// Describes the database details required to connect to an Amazon Redshift 694// database. 695type RedshiftDatabase struct { 696 697 // The ID of an Amazon Redshift cluster. 698 // 699 // This member is required. 700 ClusterIdentifier *string 701 702 // The name of a database hosted on an Amazon Redshift cluster. 703 // 704 // This member is required. 705 DatabaseName *string 706 707 noSmithyDocumentSerde 708} 709 710// Describes the database credentials for connecting to a database on an Amazon 711// Redshift cluster. 712type RedshiftDatabaseCredentials struct { 713 714 // A password to be used by Amazon ML to connect to a database on an Amazon 715 // Redshift cluster. The password should have sufficient permissions to execute a 716 // RedshiftSelectSqlQuery query. The password should be valid for an Amazon 717 // Redshift USER 718 // (https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). 719 // 720 // This member is required. 721 Password *string 722 723 // A username to be used by Amazon Machine Learning (Amazon ML)to connect to a 724 // database on an Amazon Redshift cluster. The username should have sufficient 725 // permissions to execute the RedshiftSelectSqlQuery query. The username should be 726 // valid for an Amazon Redshift USER 727 // (https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). 728 // 729 // This member is required. 730 Username *string 731 732 noSmithyDocumentSerde 733} 734 735// Describes the data specification of an Amazon Redshift DataSource. 736type RedshiftDataSpec struct { 737 738 // Describes AWS Identity and Access Management (IAM) credentials that are used 739 // connect to the Amazon Redshift database. 740 // 741 // This member is required. 742 DatabaseCredentials *RedshiftDatabaseCredentials 743 744 // Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift 745 // DataSource. 746 // 747 // This member is required. 748 DatabaseInformation *RedshiftDatabase 749 750 // Describes an Amazon S3 location to store the result set of the SelectSqlQuery 751 // query. 752 // 753 // This member is required. 754 S3StagingLocation *string 755 756 // Describes the SQL Query to execute on an Amazon Redshift database for an Amazon 757 // Redshift DataSource. 758 // 759 // This member is required. 760 SelectSqlQuery *string 761 762 // A JSON string that represents the splitting and rearrangement processing to be 763 // applied to a DataSource. If the DataRearrangement parameter is not provided, all 764 // of the input data is used to create the Datasource. There are multiple 765 // parameters that control what data is used to create a datasource: 766 // 767 // * 768 // percentBegin Use percentBegin to indicate the beginning of the range of the data 769 // used to create the Datasource. If you do not include percentBegin and 770 // percentEnd, Amazon ML includes all of the data when creating the datasource. 771 // 772 // * 773 // percentEnd Use percentEnd to indicate the end of the range of the data used to 774 // create the Datasource. If you do not include percentBegin and percentEnd, Amazon 775 // ML includes all of the data when creating the datasource. 776 // 777 // * complement The 778 // complement parameter instructs Amazon ML to use the data that is not included in 779 // the range of percentBegin to percentEnd to create a datasource. The complement 780 // parameter is useful if you need to create complementary datasources for training 781 // and evaluation. To create a complementary datasource, use the same values for 782 // percentBegin and percentEnd, along with the complement parameter. For example, 783 // the following two datasources do not share any data, and can be used to train 784 // and evaluate a model. The first datasource has 25 percent of the data, and the 785 // second one has 75 percent of the data. Datasource for evaluation: 786 // {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: 787 // {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} 788 // 789 // * 790 // strategy To change how Amazon ML splits the data for a datasource, use the 791 // strategy parameter. The default value for the strategy parameter is sequential, 792 // meaning that Amazon ML takes all of the data records between the percentBegin 793 // and percentEnd parameters for the datasource, in the order that the records 794 // appear in the input data. The following two DataRearrangement lines are examples 795 // of sequentially ordered training and evaluation datasources: Datasource for 796 // evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, 797 // "strategy":"sequential"}} Datasource for training: 798 // {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", 799 // "complement":"true"}} To randomly split the input data into the proportions 800 // indicated by the percentBegin and percentEnd parameters, set the strategy 801 // parameter to random and provide a string that is used as the seed value for the 802 // random data splitting (for example, you can use the S3 path to your data as the 803 // random seed string). If you choose the random split strategy, Amazon ML assigns 804 // each row of data a pseudo-random number between 0 and 100, and then selects the 805 // rows that have an assigned number between percentBegin and percentEnd. 806 // Pseudo-random numbers are assigned using both the input seed string value and 807 // the byte offset as a seed, so changing the data results in a different split. 808 // Any existing ordering is preserved. The random splitting strategy ensures that 809 // variables in the training and evaluation data are distributed similarly. It is 810 // useful in the cases where the input data may have an implicit sort order, which 811 // would otherwise result in training and evaluation datasources containing 812 // non-similar data records. The following two DataRearrangement lines are examples 813 // of non-sequentially ordered training and evaluation datasources: Datasource for 814 // evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, 815 // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} Datasource 816 // for training: {"splitting":{"percentBegin":70, "percentEnd":100, 817 // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", 818 // "complement":"true"}} 819 DataRearrangement *string 820 821 // A JSON string that represents the schema for an Amazon Redshift DataSource. The 822 // DataSchema defines the structure of the observation data in the data file(s) 823 // referenced in the DataSource. A DataSchema is not required if you specify a 824 // DataSchemaUri. Define your DataSchema as a series of key-value pairs. attributes 825 // and excludedVariableNames have an array of key-value pairs for their value. Use 826 // the following format to define your DataSchema. { "version": "1.0", 827 // "recordAnnotationFieldName": "F1", "recordWeightFieldName": "F2", 828 // "targetFieldName": "F3", "dataFormat": "CSV", "dataFileContainsHeader": true, 829 // "attributes": [ { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", 830 // "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { 831 // "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": 832 // "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": 833 // "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": 834 // "WEIGHTED_STRING_SEQUENCE" } ], "excludedVariableNames": [ "F6" ] } 835 DataSchema *string 836 837 // Describes the schema location for an Amazon Redshift DataSource. 838 DataSchemaUri *string 839 840 noSmithyDocumentSerde 841} 842 843// Describes the DataSource details specific to Amazon Redshift. 844type RedshiftMetadata struct { 845 846 // A username to be used by Amazon Machine Learning (Amazon ML)to connect to a 847 // database on an Amazon Redshift cluster. The username should have sufficient 848 // permissions to execute the RedshiftSelectSqlQuery query. The username should be 849 // valid for an Amazon Redshift USER 850 // (https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html). 851 DatabaseUserName *string 852 853 // Describes the database details required to connect to an Amazon Redshift 854 // database. 855 RedshiftDatabase *RedshiftDatabase 856 857 // The SQL query that is specified during CreateDataSourceFromRedshift. Returns 858 // only if Verbose is true in GetDataSourceInput. 859 SelectSqlQuery *string 860 861 noSmithyDocumentSerde 862} 863 864// Describes the data specification of a DataSource. 865type S3DataSpec struct { 866 867 // The location of the data file(s) used by a DataSource. The URI specifies a data 868 // file or an Amazon Simple Storage Service (Amazon S3) directory or bucket 869 // containing data files. 870 // 871 // This member is required. 872 DataLocationS3 *string 873 874 // A JSON string that represents the splitting and rearrangement processing to be 875 // applied to a DataSource. If the DataRearrangement parameter is not provided, all 876 // of the input data is used to create the Datasource. There are multiple 877 // parameters that control what data is used to create a datasource: 878 // 879 // * 880 // percentBegin Use percentBegin to indicate the beginning of the range of the data 881 // used to create the Datasource. If you do not include percentBegin and 882 // percentEnd, Amazon ML includes all of the data when creating the datasource. 883 // 884 // * 885 // percentEnd Use percentEnd to indicate the end of the range of the data used to 886 // create the Datasource. If you do not include percentBegin and percentEnd, Amazon 887 // ML includes all of the data when creating the datasource. 888 // 889 // * complement The 890 // complement parameter instructs Amazon ML to use the data that is not included in 891 // the range of percentBegin to percentEnd to create a datasource. The complement 892 // parameter is useful if you need to create complementary datasources for training 893 // and evaluation. To create a complementary datasource, use the same values for 894 // percentBegin and percentEnd, along with the complement parameter. For example, 895 // the following two datasources do not share any data, and can be used to train 896 // and evaluate a model. The first datasource has 25 percent of the data, and the 897 // second one has 75 percent of the data. Datasource for evaluation: 898 // {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: 899 // {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} 900 // 901 // * 902 // strategy To change how Amazon ML splits the data for a datasource, use the 903 // strategy parameter. The default value for the strategy parameter is sequential, 904 // meaning that Amazon ML takes all of the data records between the percentBegin 905 // and percentEnd parameters for the datasource, in the order that the records 906 // appear in the input data. The following two DataRearrangement lines are examples 907 // of sequentially ordered training and evaluation datasources: Datasource for 908 // evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, 909 // "strategy":"sequential"}} Datasource for training: 910 // {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", 911 // "complement":"true"}} To randomly split the input data into the proportions 912 // indicated by the percentBegin and percentEnd parameters, set the strategy 913 // parameter to random and provide a string that is used as the seed value for the 914 // random data splitting (for example, you can use the S3 path to your data as the 915 // random seed string). If you choose the random split strategy, Amazon ML assigns 916 // each row of data a pseudo-random number between 0 and 100, and then selects the 917 // rows that have an assigned number between percentBegin and percentEnd. 918 // Pseudo-random numbers are assigned using both the input seed string value and 919 // the byte offset as a seed, so changing the data results in a different split. 920 // Any existing ordering is preserved. The random splitting strategy ensures that 921 // variables in the training and evaluation data are distributed similarly. It is 922 // useful in the cases where the input data may have an implicit sort order, which 923 // would otherwise result in training and evaluation datasources containing 924 // non-similar data records. The following two DataRearrangement lines are examples 925 // of non-sequentially ordered training and evaluation datasources: Datasource for 926 // evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, 927 // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}} Datasource 928 // for training: {"splitting":{"percentBegin":70, "percentEnd":100, 929 // "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", 930 // "complement":"true"}} 931 DataRearrangement *string 932 933 // A JSON string that represents the schema for an Amazon S3 DataSource. The 934 // DataSchema defines the structure of the observation data in the data file(s) 935 // referenced in the DataSource. You must provide either the DataSchema or the 936 // DataSchemaLocationS3. Define your DataSchema as a series of key-value pairs. 937 // attributes and excludedVariableNames have an array of key-value pairs for their 938 // value. Use the following format to define your DataSchema. { "version": "1.0", 939 // "recordAnnotationFieldName": "F1", "recordWeightFieldName": "F2", 940 // "targetFieldName": "F3", "dataFormat": "CSV", "dataFileContainsHeader": true, 941 // "attributes": [ { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", 942 // "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { 943 // "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": 944 // "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": 945 // "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": 946 // "WEIGHTED_STRING_SEQUENCE" } ], "excludedVariableNames": [ "F6" ] } 947 DataSchema *string 948 949 // Describes the schema location in Amazon S3. You must provide either the 950 // DataSchema or the DataSchemaLocationS3. 951 DataSchemaLocationS3 *string 952 953 noSmithyDocumentSerde 954} 955 956// A custom key-value pair associated with an ML object, such as an ML model. 957type Tag struct { 958 959 // A unique identifier for the tag. Valid characters include Unicode letters, 960 // digits, white space, _, ., /, =, +, -, %, and @. 961 Key *string 962 963 // An optional string, typically used to describe or define the tag. Valid 964 // characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, 965 // and @. 966 Value *string 967 968 noSmithyDocumentSerde 969} 970 971type noSmithyDocumentSerde = smithydocument.NoSerde 972