From 5673d54a18f0abc2699f577abce1b576c120bd34 Mon Sep 17 00:00:00 2001 From: pcooman Date: Mon, 13 Feb 2023 10:52:02 -0600 Subject: [PATCH 01/22] Deprecate the "local" argument in CivisFuture() (#247) * DESCRIPTION: patch update to v3.0.1 + add Peter as maintainer * deprecate the "local" argument in CivisFuture() and future:Future() * rebuild documentation * DEP remove @inheritParams from fetch_output() * Update CHANGELOG.md * use `deprecate_soft` instead of `deprecate_warn` * move lifecycle dependency to Suggests * ignore all civis.Rcheck files * explicitly document "local" argument to CivisFuture() * split "x" and "newdata" documentation in civis_ml() * re-build default client * Revert "re-build default client" This reverts commit 7e4e78547f8b8e30e34973741f7e317e3ca68188. * run generate_default_client.R * run fetch_and_generate_client() * Revert "run fetch_and_generate_client()" This reverts commit 697e592e3e195c99d09138638208273ace68ebe9. * Revert "run generate_default_client.R" This reverts commit dcd569046bbc2b569e219c8f214766882aa3febf. * update documentation * change version to minor update (v3.1.0) * Update CHANGELOG.md --- .gitignore | 1 + CHANGELOG.md | 5 +++++ DESCRIPTION | 8 +++++--- R/civis_future.R | 9 +++++++-- R/civis_ml.R | 3 ++- R/scripts.R | 1 - man/CivisFuture.Rd | 19 ++++++++----------- man/await.Rd | 4 ++-- man/civis_ml.Rd | 4 +++- man/download_civis.Rd | 8 ++++---- man/fetch_output_file_ids.Rd | 4 ++-- man/query_civis.Rd | 8 ++++---- man/query_civis_file.Rd | 8 ++++---- man/read_civis.Rd | 10 +++++----- man/write_civis.Rd | 8 ++++---- man/write_civis_file.Rd | 8 ++++---- 16 files changed, 60 insertions(+), 48 deletions(-) diff --git a/.gitignore b/.gitignore index 5ea74231..516d8941 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ inst/web .*.swp .*.swo .DS_Store +civis.Rcheck/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e974bc7..89ea546b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,11 @@ ## Unreleased +## [3.0.1] + +### Changed +- Deprecated the `local` argument to the `CivisFuture()` function. The `local` argument is defunct for `future` versions > 1.31.0. + ## [3.0.0] - 2020-06-22 ### Changed diff --git a/DESCRIPTION b/DESCRIPTION index 85c52939..6a207e40 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,8 +1,9 @@ Package: civis Title: R Client for the 'Civis Platform API' -Version: 3.0.0 +Version: 3.1.0 Authors@R: c( - person("Patrick", "Miller", email = "pmiller@civisanalytics.com", role = c("cre", "aut")), + person("Peter", "Cooman", email = "pcooman@civisanalytics.com", role = c("cre", "ctb")), + person("Patrick", "Miller", email = "pmiller@civisanalytics.com", role = "aut"), person("Keith", "Ingersoll", email = "kingersoll@civisanalytics.com", role = "aut"), person("Bill", "Lattner", email = "wlattner@civisanalytics.com", role = "ctb"), person("Anh", "Le", email = "ale@civisanalytics.com", role = "ctb"), @@ -35,6 +36,7 @@ Suggests: feather, ggplot2, knitr, + lifecycle, rmarkdown, roxygen2, mockery, @@ -42,7 +44,7 @@ Suggests: rstudioapi, testthat, yaml -RoxygenNote: 7.1.0 +RoxygenNote: 7.2.2 VignetteBuilder: knitr Collate: 'await.R' diff --git a/R/civis_future.R b/R/civis_future.R index 655eb40b..6a86f8ba 100644 --- a/R/civis_future.R +++ b/R/civis_future.R @@ -49,6 +49,7 @@ class(civis_platform) <- c("CivisFuture", "future", "function") #' Evaluate an expression in Civis Platform #' @inheritParams future::Future +#' @param local deprecated as of \code{civis} v3.0.1 #' @param required_resources resources, see \code{\link{scripts_post_containers}} #' @param docker_image_name the image for the container script. #' @param docker_image_tag the tag for the Docker image. @@ -63,7 +64,7 @@ CivisFuture <- function(expr = NULL, globals = TRUE, packages = NULL, lazy = FALSE, - local = TRUE, + local = lifecycle::deprecated(), gc = FALSE, earlySignal = FALSE, label = NULL, @@ -72,6 +73,11 @@ CivisFuture <- function(expr = NULL, docker_image_tag = "latest", ...) { + if (lifecycle::is_present(local)) { + lifecycle::deprecate_soft(when = "3.0.1", + what = "civis::CivisFuture(local)") + } + gp <- future::getGlobalsAndPackages(expr, envir = envir, globals = globals) ## if there are globals, assign them in envir @@ -87,7 +93,6 @@ CivisFuture <- function(expr = NULL, globals = gp$globals, packages = unique(c(packages, gp$packages)), lazy = lazy, - local = local, gc = gc, earlySignal = earlySignal, label = label, diff --git a/R/civis_ml.R b/R/civis_ml.R index d247cff6..e7b7ae52 100644 --- a/R/civis_ml.R +++ b/R/civis_ml.R @@ -7,7 +7,8 @@ #' @param model_id The \code{id} of CivisML model built previously. #' @param run_id Optional, the \code{id} of a CivisML model run. If \code{NULL}, #' defaults to fetching the latest run. -#' @param x,newdata See the Data Sources section below. +#' @param x See the Data Sources section below. +#' @param newdata See the Data Sources section below. #' @param model_type The name of the CivisML workflow. See the Workflows section #' below. #' @param dependent_variable The dependent variable of the training dataset. diff --git a/R/scripts.R b/R/scripts.R index e19a4160..f1932ee6 100644 --- a/R/scripts.R +++ b/R/scripts.R @@ -40,7 +40,6 @@ fetch_output_file_ids <- function(x, regex = NULL) { #' Return output from a civis_script. #' @describeIn fetch_output_file_ids Return output of \code{scripts_list_*_runs_outputs} matching \code{regex}. -#' @inheritParams fetch_output_file_ids #' @family script_utils #' @export fetch_output <- function(x, regex = NULL) { diff --git a/man/CivisFuture.Rd b/man/CivisFuture.Rd index 5661e1a5..ce5c428a 100644 --- a/man/CivisFuture.Rd +++ b/man/CivisFuture.Rd @@ -16,7 +16,7 @@ CivisFuture( globals = TRUE, packages = NULL, lazy = FALSE, - local = TRUE, + local = lifecycle::deprecated(), gc = FALSE, earlySignal = FALSE, label = NULL, @@ -56,10 +56,7 @@ to be attached in the \R environment evaluating the future.} \item{lazy}{If FALSE (default), the future is resolved eagerly (starting immediately), otherwise not.} -\item{local}{If TRUE, the expression is evaluated such that -all assignments are done to local temporary environment, otherwise -the assignments are done to the global environment of the \R process -evaluating the future.} +\item{local}{deprecated as of \code{civis} v3.0.1} \item{gc}{If TRUE, the garbage collector run (in the process that evaluated the future) only after the value of the future is collected. @@ -93,14 +90,14 @@ Evaluate an expression in Civis Platform } \section{Methods (by generic)}{ \itemize{ -\item \code{run}: Run a CivisFuture +\item \code{run(CivisFuture)}: Run a CivisFuture -\item \code{result}: Return the value of a CivisFuture +\item \code{result(CivisFuture)}: Return the value of a CivisFuture -\item \code{cancel}: Cancel a CivisFuture +\item \code{cancel(CivisFuture)}: Cancel a CivisFuture -\item \code{resolved}: Check if a CivisFutre has resolved +\item \code{resolved(CivisFuture)}: Check if a CivisFutre has resolved -\item \code{fetch_logs}: Fetch logs from a CivisFuture -}} +\item \code{fetch_logs(CivisFuture)}: Fetch logs from a CivisFuture +}} diff --git a/man/await.Rd b/man/await.Rd index f0e5620b..13baa179 100644 --- a/man/await.Rd +++ b/man/await.Rd @@ -102,9 +102,9 @@ The polling interval can be set to a fixed value globally with } \section{Functions}{ \itemize{ -\item \code{await_all}: Call a function repeatedly for all values of a vector until all have reached a completed status -}} +\item \code{await_all()}: Call a function repeatedly for all values of a vector until all have reached a completed status +}} \examples{ \dontrun{ diff --git a/man/civis_ml.Rd b/man/civis_ml.Rd index 9a17dd11..cead903b 100644 --- a/man/civis_ml.Rd +++ b/man/civis_ml.Rd @@ -51,7 +51,7 @@ civis_ml_fetch_existing(model_id, run_id = NULL) ) } \arguments{ -\item{x, newdata}{See the Data Sources section below.} +\item{x}{See the Data Sources section below.} \item{dependent_variable}{The dependent variable of the training dataset. For a multi-target problem, this should be a vector of column names of @@ -140,6 +140,8 @@ defaults to fetching the latest run.} \item{object}{A \code{civis_ml} object.} +\item{newdata}{See the Data Sources section below.} + \item{output_table}{The table in which to put predictions.} \item{output_db}{The database containing \code{output_table}. If not diff --git a/man/download_civis.Rd b/man/download_civis.Rd index 97dacc4a..cf19450d 100644 --- a/man/download_civis.Rd +++ b/man/download_civis.Rd @@ -77,13 +77,13 @@ this database will automatically be used as the default. } \section{Methods (by class)}{ \itemize{ -\item \code{character}: Download a table from Redshift to disk as CSV. +\item \code{download_civis(character)}: Download a table from Redshift to disk as CSV. -\item \code{sql}: Download the result of a SQL query from Redshift to disk as CSV. +\item \code{download_civis(sql)}: Download the result of a SQL query from Redshift to disk as CSV. -\item \code{numeric}: Download a file from Platform files endpoint to disk. -}} +\item \code{download_civis(numeric)}: Download a file from Platform files endpoint to disk. +}} \examples{ \dontrun{ # Download all columns in a single table into a CSV diff --git a/man/fetch_output_file_ids.Rd b/man/fetch_output_file_ids.Rd index 43cb5913..e407727d 100644 --- a/man/fetch_output_file_ids.Rd +++ b/man/fetch_output_file_ids.Rd @@ -25,9 +25,9 @@ If the script has no outputs, the results are a list of length is 0. } \section{Functions}{ \itemize{ -\item \code{fetch_output}: Return output of \code{scripts_list_*_runs_outputs} matching \code{regex}. -}} +\item \code{fetch_output()}: Return output of \code{scripts_list_*_runs_outputs} matching \code{regex}. +}} \examples{ \dontrun{ out <- fetch_output(civis_script(1234)) diff --git a/man/query_civis.Rd b/man/query_civis.Rd index 12750206..90f3c99b 100644 --- a/man/query_civis.Rd +++ b/man/query_civis.Rd @@ -33,13 +33,13 @@ this database will automatically be used as the default. } \section{Methods (by class)}{ \itemize{ -\item \code{sql}: Run a SQL query. +\item \code{query_civis(sql)}: Run a SQL query. -\item \code{numeric}: Run a SQL query from a previous SQL query id. +\item \code{query_civis(numeric)}: Run a SQL query from a previous SQL query id. -\item \code{character}: Run a SQL query. -}} +\item \code{query_civis(character)}: Run a SQL query. +}} \examples{ \dontrun{ query_civis("GRANT ALL ON schema.my_table TO GROUP admin", "database", credential=0000) diff --git a/man/query_civis_file.Rd b/man/query_civis_file.Rd index d1dc6674..fcd577ed 100644 --- a/man/query_civis_file.Rd +++ b/man/query_civis_file.Rd @@ -57,13 +57,13 @@ which is a gzipped csv. } \section{Methods (by class)}{ \itemize{ -\item \code{character}: Export a \code{"schema.table"} to a file id. +\item \code{query_civis_file(character)}: Export a \code{"schema.table"} to a file id. -\item \code{sql}: Export results of a query to a file id. +\item \code{query_civis_file(sql)}: Export results of a query to a file id. -\item \code{numeric}: Run an existing sql script and return the file id of the results on S3. -}} +\item \code{query_civis_file(numeric)}: Run an existing sql script and return the file id of the results on S3. +}} \examples{ \dontrun{ id <- query_civis_file("schema.tablename", database = "my_database") diff --git a/man/read_civis.Rd b/man/read_civis.Rd index 9379f30d..1079ab02 100644 --- a/man/read_civis.Rd +++ b/man/read_civis.Rd @@ -69,15 +69,15 @@ If the script has no outputs, an empty list will be returned. } \section{Methods (by class)}{ \itemize{ -\item \code{numeric}: Return a file as a data frame +\item \code{read_civis(numeric)}: Return a file as a data frame -\item \code{character}: Return all columns from a table as a data frame. +\item \code{read_civis(character)}: Return all columns from a table as a data frame. -\item \code{sql}: Return a SQL query as a data frame. +\item \code{read_civis(sql)}: Return a SQL query as a data frame. -\item \code{civis_script}: Return run outputs of a \code{civis_script} as a named list. -}} +\item \code{read_civis(civis_script)}: Return run outputs of a \code{civis_script} as a named list. +}} \examples{ \dontrun{ # Read all columns in a single table diff --git a/man/write_civis.Rd b/man/write_civis.Rd index 816106e2..adef0ca0 100644 --- a/man/write_civis.Rd +++ b/man/write_civis.Rd @@ -114,13 +114,13 @@ this database will automatically be used as the default. } \section{Methods (by class)}{ \itemize{ -\item \code{data.frame}: Upload a data frame to Civis Platform (Redshift). +\item \code{write_civis(data.frame)}: Upload a data frame to Civis Platform (Redshift). -\item \code{character}: Upload a csv to Civis Platform (Redshift). +\item \code{write_civis(character)}: Upload a csv to Civis Platform (Redshift). -\item \code{numeric}: Upload a csv file from the files endpoint to Civis Platform (Redshift) -}} +\item \code{write_civis(numeric)}: Upload a csv file from the files endpoint to Civis Platform (Redshift) +}} \examples{ \dontrun{ df <- read.csv(local_file) diff --git a/man/write_civis_file.Rd b/man/write_civis_file.Rd index ed44346a..85afb115 100644 --- a/man/write_civis_file.Rd +++ b/man/write_civis_file.Rd @@ -58,13 +58,13 @@ to the files endpoint. If given a filepath, the file is uploaded as-is. } \section{Methods (by class)}{ \itemize{ -\item \code{default}: Serialize R object +\item \code{write_civis_file(default)}: Serialize R object -\item \code{data.frame}: Upload a data frame as a csv +\item \code{write_civis_file(data.frame)}: Upload a data frame as a csv -\item \code{character}: Upload any file -}} +\item \code{write_civis_file(character)}: Upload any file +}} \examples{ \dontrun{ data(iris) From ddf009e6834b2f12ae645a73d8b34cc14f4182cf Mon Sep 17 00:00:00 2001 From: pcooman Date: Mon, 13 Feb 2023 14:25:56 -0600 Subject: [PATCH 02/22] Update default client for v3.1.0 (#249) * generate new default client * remove new API endpoints * missed some new API endpoints * re-adding queries_delete_runs() * Revert "re-adding queries_delete_runs()" This reverts commit 3e76458cf8605dd84e7780201b26088a1c2426d4. * Revert "missed some new API endpoints" This reverts commit 360524f71632d2d77dd9ecce41d8cd824aaf3e4d. * Revert "remove new API endpoints" This reverts commit e44c815525f7f6715919e64c245716bcc3288f49. * Revert "generate new default client" This reverts commit 3331f1c71d26f90f9ff9b9c2304d94ba5e91a604. * remove deprecated API endpoints * update links * add new generated default client --- .Rbuildignore | 2 + .gitignore | 2 + NAMESPACE | 176 +- R/civis_ml.R | 24 +- R/generated_client.R | 21799 ++++++++++------ README.md | 4 +- man/admin_list_organizations.Rd | 4 + man/aliases_get.Rd | 2 +- man/aliases_get_object_type.Rd | 4 +- man/aliases_list.Rd | 4 +- man/aliases_list_dependencies.Rd | 25 + man/aliases_patch.Rd | 4 +- man/aliases_post.Rd | 4 +- man/aliases_put.Rd | 4 +- man/aliases_put_transfer.Rd | 40 + man/apps_delete_instances_projects.Rd | 21 - man/apps_get.Rd | 30 - man/apps_get_instances.Rd | 36 - man/apps_get_releases.Rd | 24 - man/apps_list.Rd | 18 - man/apps_list_instances.Rd | 52 - man/apps_list_instances_projects.Rd | 44 - man/apps_list_releases.Rd | 39 - man/apps_patch_instances.Rd | 38 - man/apps_patch_releases.Rd | 28 - man/apps_post_instances.Rd | 36 - man/apps_post_releases.Rd | 26 - man/apps_put_features.Rd | 34 - man/apps_put_instances_archive.Rd | 38 - man/apps_put_instances_projects.Rd | 21 - man/apps_put_releases_archive.Rd | 26 - man/civis_ml.Rd | 24 +- man/clusters_get_kubernetes.Rd | 4 +- ...lusters_get_kubernetes_instance_configs.Rd | 4 +- man/clusters_get_kubernetes_partitions.Rd | 2 +- man/clusters_list_kubernetes.Rd | 13 +- man/clusters_list_kubernetes_deployments.Rd | 2 + ...netes_instance_configs_active_workloads.Rd | 40 + man/clusters_list_kubernetes_partitions.Rd | 2 +- man/clusters_patch_kubernetes.Rd | 8 +- man/clusters_patch_kubernetes_partitions.Rd | 4 +- man/clusters_post_kubernetes.Rd | 7 +- man/clusters_post_kubernetes_partitions.Rd | 4 +- man/credentials_delete.Rd | 17 + man/credentials_get.Rd | 11 +- man/credentials_list.Rd | 20 +- man/credentials_list_dependencies.Rd | 25 + man/credentials_list_types.Rd | 15 + man/credentials_patch.Rd | 69 + man/credentials_post.Rd | 23 +- man/credentials_post_authenticate.Rd | 13 +- man/credentials_put.Rd | 23 +- man/credentials_put_transfer.Rd | 40 + man/databases_delete_whitelist_ips.Rd | 19 - man/databases_get_schema_privileges.Rd | 23 + ...tabases_get_table_privilegesschema_name.Rd | 25 + man/databases_list_schemas.Rd | 6 +- man/databases_list_tables.Rd | 5 + man/databases_list_tables_search.Rd | 5 + man/databases_post_whitelist_ips.Rd | 27 - man/enhancements_get_cass_ncoa.Rd | 7 +- man/enhancements_get_cass_ncoa_runs.Rd | 5 +- man/enhancements_get_civis_data_match.Rd | 10 +- man/enhancements_get_civis_data_match_runs.Rd | 5 +- man/enhancements_get_geocode.Rd | 6 +- man/enhancements_get_geocode_runs.Rd | 5 +- man/enhancements_list.Rd | 2 +- ...nhancements_list_cass_ncoa_dependencies.Rd | 25 + man/enhancements_list_cass_ncoa_runs.Rd | 5 +- ...ents_list_civis_data_match_dependencies.Rd | 25 + ...enhancements_list_civis_data_match_runs.Rd | 5 +- man/enhancements_list_geocode_dependencies.Rd | 25 + man/enhancements_list_geocode_runs.Rd | 5 +- man/enhancements_patch_cass_ncoa.Rd | 17 +- man/enhancements_patch_civis_data_match.Rd | 19 +- man/enhancements_patch_geocode.Rd | 11 +- man/enhancements_post_cass_ncoa.Rd | 17 +- man/enhancements_post_cass_ncoa_runs.Rd | 5 +- man/enhancements_post_civis_data_match.Rd | 19 +- ...nhancements_post_civis_data_match_clone.Rd | 10 +- ...enhancements_post_civis_data_match_runs.Rd | 5 +- man/enhancements_post_geocode.Rd | 11 +- man/enhancements_post_geocode_runs.Rd | 5 +- man/enhancements_put_cass_ncoa.Rd | 17 +- man/enhancements_put_cass_ncoa_archive.Rd | 7 +- man/enhancements_put_cass_ncoa_transfer.Rd | 40 + man/enhancements_put_civis_data_match.Rd | 19 +- ...hancements_put_civis_data_match_archive.Rd | 10 +- ...ancements_put_civis_data_match_transfer.Rd | 40 + man/enhancements_put_geocode.Rd | 11 +- man/enhancements_put_geocode_archive.Rd | 6 +- man/enhancements_put_geocode_transfer.Rd | 40 + man/exports_delete_files_csv_runs.Rd | 19 + man/exports_get_files_csv.Rd | 1 + man/exports_get_files_csv_runs.Rd | 26 + man/exports_list.Rd | 6 +- man/exports_list_files_csv_runs.Rd | 37 + man/exports_list_files_csv_runs_logs.Rd | 27 + man/exports_list_files_csv_runs_outputs.Rd | 39 + man/exports_patch_files_csv.Rd | 1 + man/exports_post_files_csv.Rd | 1 + man/exports_post_files_csv_runs.Rd | 24 + man/exports_put_files_csv.Rd | 1 + man/exports_put_files_csv_archive.Rd | 1 + man/feature_flags_delete_organizations.Rd | 19 + man/feature_flags_delete_users.Rd | 2 +- man/feature_flags_get.Rd | 5 +- man/feature_flags_list.Rd | 6 +- man/feature_flags_put_groups.Rd | 6 +- man/feature_flags_put_organizations.Rd | 29 + man/feature_flags_put_users.Rd | 8 +- man/files_get.Rd | 1 + man/files_list_dependencies.Rd | 25 + man/files_patch.Rd | 1 + man/files_post.Rd | 1 + man/files_put.Rd | 1 + man/files_put_transfer.Rd | 40 + man/git_repos_list_refs.Rd | 19 + man/groups_get.Rd | 16 +- man/groups_list.Rd | 15 +- man/groups_list_child_groups.Rd | 32 + man/groups_patch.Rd | 25 +- man/groups_post.Rd | 25 +- man/groups_put.Rd | 25 +- man/groups_put_members.Rd | 16 +- man/imports_delete_files_csv_runs.Rd | 19 + man/imports_get.Rd | 6 +- man/imports_get_files_csv.Rd | 1 + man/imports_get_files_csv_runs.Rd | 27 + man/imports_get_files_runs.Rd | 5 +- man/imports_list.Rd | 11 +- man/imports_list_dependencies.Rd | 25 + man/imports_list_files_csv_runs.Rd | 39 + man/imports_list_files_csv_runs_logs.Rd | 27 + man/imports_list_files_runs.Rd | 5 +- man/imports_patch_files_csv.Rd | 1 + man/imports_post.Rd | 11 +- man/imports_post_files_csv.Rd | 1 + man/imports_post_files_csv_runs.Rd | 25 + man/imports_post_files_runs.Rd | 5 +- man/imports_post_syncs.Rd | 4 +- man/imports_put.Rd | 11 +- man/imports_put_archive.Rd | 6 +- man/imports_put_files_csv.Rd | 1 + man/imports_put_files_csv_archive.Rd | 1 + man/imports_put_syncs.Rd | 4 +- man/imports_put_syncs_archive.Rd | 2 +- man/imports_put_transfer.Rd | 40 + man/jobs_get.Rd | 14 +- man/jobs_list.Rd | 16 +- man/jobs_list_dependencies.Rd | 25 + man/jobs_list_parents.Rd | 14 +- man/jobs_list_workflows.Rd | 5 +- man/jobs_put_archive.Rd | 14 +- man/jobs_put_transfer.Rd | 40 + man/json_values_list_dependencies.Rd | 25 + man/json_values_put_transfer.Rd | 40 + man/media_get_optimizations_runs.Rd | 5 +- man/media_list_optimizations_runs.Rd | 5 +- man/media_post_optimizations_runs.Rd | 5 +- man/models_get.Rd | 5 +- man/models_list.Rd | 11 +- man/models_list_dependencies.Rd | 25 + man/models_list_schedules.Rd | 5 +- man/models_put_archive.Rd | 5 +- man/models_put_transfer.Rd | 40 + man/notebooks_get.Rd | 9 +- man/notebooks_get_deployments.Rd | 7 +- man/notebooks_get_git_commits.Rd | 4 +- man/notebooks_list.Rd | 9 +- man/notebooks_list_dependencies.Rd | 25 + man/notebooks_list_deployments.Rd | 7 +- man/notebooks_list_git.Rd | 5 +- man/notebooks_list_git_commits.Rd | 4 +- man/notebooks_patch.Rd | 12 +- man/notebooks_patch_git.Rd | 49 + man/notebooks_post.Rd | 12 +- man/notebooks_post_clone.Rd | 9 +- man/notebooks_post_deployments.Rd | 11 +- man/notebooks_post_git_checkout.Rd | 21 + man/notebooks_post_git_checkout_latest.Rd | 21 + man/notebooks_put.Rd | 12 +- man/notebooks_put_archive.Rd | 9 +- man/notebooks_put_git.Rd | 8 +- man/notebooks_put_transfer.Rd | 40 + man/permission_sets_delete_resources.Rd | 19 + ...ion_sets_delete_resources_shares_groups.Rd | 21 + ...sion_sets_delete_resources_shares_users.Rd | 21 + ...> permission_sets_delete_shares_groups.Rd} | 8 +- ...=> permission_sets_delete_shares_users.Rd} | 8 +- man/permission_sets_get.Rd | 31 + man/permission_sets_get_resources.Rd | 24 + man/permission_sets_list.Rd | 48 + man/permission_sets_list_dependencies.Rd | 25 + man/permission_sets_list_resources.Rd | 36 + man/permission_sets_list_resources_shares.Rd | 36 + ...ares.Rd => permission_sets_list_shares.Rd} | 8 +- man/permission_sets_list_users_permissions.Rd | 23 + man/permission_sets_patch.Rd | 35 + man/permission_sets_patch_resources.Rd | 26 + man/permission_sets_post.Rd | 33 + man/permission_sets_post_resources.Rd | 26 + man/permission_sets_put.Rd | 35 + man/permission_sets_put_archive.Rd | 33 + ...ission_sets_put_resources_shares_groups.Rd | 51 + ...mission_sets_put_resources_shares_users.Rd | 51 + ...d => permission_sets_put_shares_groups.Rd} | 9 +- ...Rd => permission_sets_put_shares_users.Rd} | 9 +- man/permission_sets_put_transfer.Rd | 40 + man/predictions_get.Rd | 5 +- man/predictions_list_schedules.Rd | 5 +- man/projects_get.Rd | 12 +- man/projects_list.Rd | 9 +- man/projects_list_dependencies.Rd | 25 + man/projects_post.Rd | 16 +- man/projects_post_clone.Rd | 192 + man/projects_put.Rd | 22 +- man/projects_put_archive.Rd | 12 +- man/projects_put_auto_share.Rd | 190 + man/projects_put_transfer.Rd | 40 + man/queries_delete.Rd | 44 + man/queries_get.Rd | 5 +- man/queries_get_runs.Rd | 5 +- man/queries_list.Rd | 32 +- man/queries_list_runs.Rd | 5 +- man/queries_post.Rd | 6 +- man/queries_post_runs.Rd | 5 +- man/queries_put_scripts.Rd | 5 +- ...d => remote_hosts_delete_shares_groups.Rd} | 8 +- ...Rd => remote_hosts_delete_shares_users.Rd} | 8 +- man/remote_hosts_get.Rd | 33 + man/remote_hosts_list.Rd | 12 +- ..._shares.Rd => remote_hosts_list_shares.Rd} | 8 +- man/remote_hosts_patch.Rd | 47 + man/remote_hosts_post.Rd | 26 +- man/remote_hosts_put.Rd | 41 + ...s.Rd => remote_hosts_put_shares_groups.Rd} | 9 +- ...rs.Rd => remote_hosts_put_shares_users.Rd} | 9 +- man/reports_get.Rd | 1 + man/reports_get_git_commits.Rd | 4 +- man/reports_get_services.Rd | 2 + man/reports_list.Rd | 6 +- man/reports_list_dependencies.Rd | 25 + man/reports_list_git.Rd | 5 +- man/reports_list_git_commits.Rd | 4 +- man/reports_list_services_dependencies.Rd | 25 + man/reports_patch.Rd | 1 + man/reports_patch_git.Rd | 49 + man/reports_patch_services.Rd | 2 + man/reports_post.Rd | 1 + man/reports_post_git_checkout.Rd | 21 + man/reports_post_git_checkout_latest.Rd | 21 + man/reports_post_grants.Rd | 1 + man/reports_post_services.Rd | 2 + man/reports_put_archive.Rd | 1 + man/reports_put_git.Rd | 8 +- man/reports_put_services_archive.Rd | 39 + man/reports_put_services_transfer.Rd | 40 + man/reports_put_transfer.Rd | 40 + man/roles_list.Rd | 27 + man/scripts_get.Rd | 6 +- man/scripts_get_containers.Rd | 18 +- man/scripts_get_containers_runs.Rd | 7 +- man/scripts_get_custom.Rd | 20 +- man/scripts_get_custom_runs.Rd | 7 +- man/scripts_get_javascript.Rd | 7 +- man/scripts_get_javascript_git_commits.Rd | 4 +- man/scripts_get_javascript_runs.Rd | 5 +- man/scripts_get_python3.Rd | 8 +- man/scripts_get_python3_git_commits.Rd | 4 +- man/scripts_get_python3_runs.Rd | 7 +- man/scripts_get_r.Rd | 8 +- man/scripts_get_r_git_commits.Rd | 4 +- man/scripts_get_r_runs.Rd | 7 +- man/scripts_get_sql.Rd | 7 +- man/scripts_get_sql_git_commits.Rd | 4 +- man/scripts_get_sql_runs.Rd | 14 +- man/scripts_list.Rd | 2 +- man/scripts_list_containers_dependencies.Rd | 25 + man/scripts_list_containers_runs.Rd | 7 +- man/scripts_list_custom.Rd | 2 +- man/scripts_list_custom_dependencies.Rd | 25 + man/scripts_list_custom_runs.Rd | 7 +- man/scripts_list_javascript_dependencies.Rd | 25 + man/scripts_list_javascript_git.Rd | 5 +- man/scripts_list_javascript_git_commits.Rd | 4 +- man/scripts_list_javascript_runs.Rd | 5 +- man/scripts_list_python3_dependencies.Rd | 25 + man/scripts_list_python3_git.Rd | 5 +- man/scripts_list_python3_git_commits.Rd | 4 +- man/scripts_list_python3_runs.Rd | 7 +- man/scripts_list_r_dependencies.Rd | 25 + man/scripts_list_r_git.Rd | 5 +- man/scripts_list_r_git_commits.Rd | 4 +- man/scripts_list_r_runs.Rd | 7 +- man/scripts_list_sql_dependencies.Rd | 25 + man/scripts_list_sql_git.Rd | 5 +- man/scripts_list_sql_git_commits.Rd | 4 +- man/scripts_list_sql_runs.Rd | 14 +- man/scripts_patch.Rd | 16 +- man/scripts_patch_container_runs.Rd | 21 + man/scripts_patch_containers.Rd | 36 +- man/scripts_patch_custom.Rd | 41 +- man/scripts_patch_javascript.Rd | 17 +- man/scripts_patch_javascript_git.Rd | 49 + man/scripts_patch_javascript_runs.Rd | 21 + man/scripts_patch_python3.Rd | 21 +- man/scripts_patch_python3_git.Rd | 49 + man/scripts_patch_python3_runs.Rd | 21 + man/scripts_patch_r.Rd | 21 +- man/scripts_patch_r_git.Rd | 49 + man/scripts_patch_r_runs.Rd | 21 + man/scripts_patch_sql.Rd | 17 +- man/scripts_patch_sql_git.Rd | 49 + man/scripts_patch_sql_runs.Rd | 21 + man/scripts_post.Rd | 6 +- man/scripts_post_containers.Rd | 36 +- man/scripts_post_containers_clone.Rd | 18 +- man/scripts_post_containers_runs.Rd | 7 +- man/scripts_post_custom.Rd | 41 +- man/scripts_post_custom_clone.Rd | 20 +- man/scripts_post_custom_runs.Rd | 7 +- man/scripts_post_javascript.Rd | 17 +- man/scripts_post_javascript_clone.Rd | 7 +- man/scripts_post_javascript_git_checkout.Rd | 21 + ...pts_post_javascript_git_checkout_latest.Rd | 21 + man/scripts_post_javascript_runs.Rd | 5 +- man/scripts_post_python3.Rd | 21 +- man/scripts_post_python3_clone.Rd | 8 +- man/scripts_post_python3_git_checkout.Rd | 21 + ...cripts_post_python3_git_checkout_latest.Rd | 21 + man/scripts_post_python3_runs.Rd | 7 +- man/scripts_post_r.Rd | 21 +- man/scripts_post_r_clone.Rd | 8 +- man/scripts_post_r_git_checkout.Rd | 21 + man/scripts_post_r_git_checkout_latest.Rd | 21 + man/scripts_post_r_runs.Rd | 7 +- man/scripts_post_sql.Rd | 17 +- man/scripts_post_sql_clone.Rd | 7 +- man/scripts_post_sql_git_checkout.Rd | 21 + man/scripts_post_sql_git_checkout_latest.Rd | 21 + man/scripts_post_sql_runs.Rd | 14 +- man/scripts_put_containers.Rd | 36 +- man/scripts_put_containers_archive.Rd | 18 +- man/scripts_put_containers_transfer.Rd | 40 + man/scripts_put_custom.Rd | 41 +- man/scripts_put_custom_archive.Rd | 20 +- man/scripts_put_custom_transfer.Rd | 40 + man/scripts_put_javascript.Rd | 17 +- man/scripts_put_javascript_archive.Rd | 7 +- man/scripts_put_javascript_git.Rd | 8 +- man/scripts_put_javascript_transfer.Rd | 40 + man/scripts_put_python3.Rd | 21 +- man/scripts_put_python3_archive.Rd | 8 +- man/scripts_put_python3_git.Rd | 8 +- man/scripts_put_python3_transfer.Rd | 40 + man/scripts_put_r.Rd | 21 +- man/scripts_put_r_archive.Rd | 8 +- man/scripts_put_r_git.Rd | 8 +- man/scripts_put_r_transfer.Rd | 40 + man/scripts_put_sql.Rd | 17 +- man/scripts_put_sql_archive.Rd | 7 +- man/scripts_put_sql_git.Rd | 8 +- man/scripts_put_sql_transfer.Rd | 40 + man/search_list.Rd | 1 + man/search_list_queries.Rd | 68 + man/services_get.Rd | 30 +- man/services_get_deployments.Rd | 7 +- man/services_list.Rd | 27 +- man/services_list_dependencies.Rd | 25 + man/services_list_deployments.Rd | 7 +- man/services_list_tokens.Rd | 1 + man/services_patch.Rd | 38 +- man/services_post.Rd | 36 +- man/services_post_clone.Rd | 30 +- man/services_post_deployments.Rd | 11 +- man/services_post_redeploy.Rd | 11 +- man/services_post_tokens.Rd | 5 +- man/services_put.Rd | 38 +- man/services_put_archive.Rd | 30 +- man/services_put_transfer.Rd | 40 + man/storage_hosts_list_dependencies.Rd | 25 + man/storage_hosts_put_transfer.Rd | 40 + man/table_tags_delete.Rd | 17 + man/table_tags_get.Rd | 30 + man/table_tags_list.Rd | 42 + man/table_tags_post.Rd | 30 + man/tables_delete_tags.Rd | 19 + man/tables_get.Rd | 11 +- man/tables_get_enhancements_cass_ncoa.Rd | 1 + man/tables_list.Rd | 13 +- man/tables_list_columns.Rd | 6 +- man/tables_patch.Rd | 5 + man/tables_post_enhancements_cass_ncoa.Rd | 6 +- man/tables_post_refresh.Rd | 11 +- man/tables_put_tags.Rd | 21 + man/templates_get_reports.Rd | 2 +- man/templates_get_scripts.Rd | 20 + man/templates_list_reports.Rd | 5 +- man/templates_list_reports_dependencies.Rd | 25 + man/templates_list_scripts.Rd | 11 + man/templates_list_scripts_dependencies.Rd | 25 + man/templates_patch_reports.Rd | 2 +- man/templates_patch_scripts.Rd | 20 + man/templates_post_reports.Rd | 2 +- man/templates_post_reports_review.Rd | 2 +- man/templates_post_scripts.Rd | 20 + man/templates_post_scripts_review.Rd | 20 + man/templates_put_reports.Rd | 2 +- man/templates_put_reports_transfer.Rd | 40 + man/templates_put_scripts.Rd | 20 + man/templates_put_scripts_transfer.Rd | 40 + man/users_delete_2fa.Rd | 59 + man/users_delete_me_superadmin.Rd | 45 + man/users_delete_sessions.Rd | 59 + man/users_get.Rd | 15 +- man/users_get_me_themes.Rd | 28 + man/users_list.Rd | 22 +- man/users_list_me.Rd | 8 +- man/users_list_me_themes.Rd | 18 + man/users_list_me_ui.Rd | 4 +- man/users_patch.Rd | 26 +- man/users_patch_me.Rd | 25 +- man/users_post.Rd | 21 +- man/users_post_me_superadmin.Rd | 45 + man/users_post_unsuspend.Rd | 20 + man/workflows_get.Rd | 6 +- man/workflows_get_executions_tasks.Rd | 2 + man/workflows_get_git_commits.Rd | 4 +- man/workflows_list.Rd | 7 +- man/workflows_list_dependencies.Rd | 25 + man/workflows_list_git.Rd | 5 +- man/workflows_list_git_commits.Rd | 4 +- man/workflows_patch.Rd | 11 +- man/workflows_patch_git.Rd | 49 + man/workflows_post.Rd | 11 +- man/workflows_post_clone.Rd | 6 +- man/workflows_post_executions.Rd | 2 +- man/workflows_post_git_checkout.Rd | 21 + man/workflows_post_git_checkout_latest.Rd | 21 + man/workflows_put.Rd | 11 +- man/workflows_put_archive.Rd | 6 +- man/workflows_put_git.Rd | 8 +- man/workflows_put_transfer.Rd | 40 + 444 files changed, 20080 insertions(+), 9966 deletions(-) create mode 100644 man/aliases_list_dependencies.Rd create mode 100644 man/aliases_put_transfer.Rd delete mode 100644 man/apps_delete_instances_projects.Rd delete mode 100644 man/apps_get.Rd delete mode 100644 man/apps_get_instances.Rd delete mode 100644 man/apps_get_releases.Rd delete mode 100644 man/apps_list.Rd delete mode 100644 man/apps_list_instances.Rd delete mode 100644 man/apps_list_instances_projects.Rd delete mode 100644 man/apps_list_releases.Rd delete mode 100644 man/apps_patch_instances.Rd delete mode 100644 man/apps_patch_releases.Rd delete mode 100644 man/apps_post_instances.Rd delete mode 100644 man/apps_post_releases.Rd delete mode 100644 man/apps_put_features.Rd delete mode 100644 man/apps_put_instances_archive.Rd delete mode 100644 man/apps_put_instances_projects.Rd delete mode 100644 man/apps_put_releases_archive.Rd create mode 100644 man/clusters_list_kubernetes_instance_configs_active_workloads.Rd create mode 100644 man/credentials_delete.Rd create mode 100644 man/credentials_list_dependencies.Rd create mode 100644 man/credentials_list_types.Rd create mode 100644 man/credentials_patch.Rd create mode 100644 man/credentials_put_transfer.Rd delete mode 100644 man/databases_delete_whitelist_ips.Rd create mode 100644 man/databases_get_schema_privileges.Rd create mode 100644 man/databases_get_table_privilegesschema_name.Rd delete mode 100644 man/databases_post_whitelist_ips.Rd create mode 100644 man/enhancements_list_cass_ncoa_dependencies.Rd create mode 100644 man/enhancements_list_civis_data_match_dependencies.Rd create mode 100644 man/enhancements_list_geocode_dependencies.Rd create mode 100644 man/enhancements_put_cass_ncoa_transfer.Rd create mode 100644 man/enhancements_put_civis_data_match_transfer.Rd create mode 100644 man/enhancements_put_geocode_transfer.Rd create mode 100644 man/exports_delete_files_csv_runs.Rd create mode 100644 man/exports_get_files_csv_runs.Rd create mode 100644 man/exports_list_files_csv_runs.Rd create mode 100644 man/exports_list_files_csv_runs_logs.Rd create mode 100644 man/exports_list_files_csv_runs_outputs.Rd create mode 100644 man/exports_post_files_csv_runs.Rd create mode 100644 man/feature_flags_delete_organizations.Rd create mode 100644 man/feature_flags_put_organizations.Rd create mode 100644 man/files_list_dependencies.Rd create mode 100644 man/files_put_transfer.Rd create mode 100644 man/git_repos_list_refs.Rd create mode 100644 man/groups_list_child_groups.Rd create mode 100644 man/imports_delete_files_csv_runs.Rd create mode 100644 man/imports_get_files_csv_runs.Rd create mode 100644 man/imports_list_dependencies.Rd create mode 100644 man/imports_list_files_csv_runs.Rd create mode 100644 man/imports_list_files_csv_runs_logs.Rd create mode 100644 man/imports_post_files_csv_runs.Rd create mode 100644 man/imports_put_transfer.Rd create mode 100644 man/jobs_list_dependencies.Rd create mode 100644 man/jobs_put_transfer.Rd create mode 100644 man/json_values_list_dependencies.Rd create mode 100644 man/json_values_put_transfer.Rd create mode 100644 man/models_list_dependencies.Rd create mode 100644 man/models_put_transfer.Rd create mode 100644 man/notebooks_list_dependencies.Rd create mode 100644 man/notebooks_patch_git.Rd create mode 100644 man/notebooks_post_git_checkout.Rd create mode 100644 man/notebooks_post_git_checkout_latest.Rd create mode 100644 man/notebooks_put_transfer.Rd create mode 100644 man/permission_sets_delete_resources.Rd create mode 100644 man/permission_sets_delete_resources_shares_groups.Rd create mode 100644 man/permission_sets_delete_resources_shares_users.Rd rename man/{apps_delete_releases_shares_groups.Rd => permission_sets_delete_shares_groups.Rd} (66%) rename man/{apps_delete_releases_shares_users.Rd => permission_sets_delete_shares_users.Rd} (66%) create mode 100644 man/permission_sets_get.Rd create mode 100644 man/permission_sets_get_resources.Rd create mode 100644 man/permission_sets_list.Rd create mode 100644 man/permission_sets_list_dependencies.Rd create mode 100644 man/permission_sets_list_resources.Rd create mode 100644 man/permission_sets_list_resources_shares.Rd rename man/{apps_list_releases_shares.Rd => permission_sets_list_shares.Rd} (85%) create mode 100644 man/permission_sets_list_users_permissions.Rd create mode 100644 man/permission_sets_patch.Rd create mode 100644 man/permission_sets_patch_resources.Rd create mode 100644 man/permission_sets_post.Rd create mode 100644 man/permission_sets_post_resources.Rd create mode 100644 man/permission_sets_put.Rd create mode 100644 man/permission_sets_put_archive.Rd create mode 100644 man/permission_sets_put_resources_shares_groups.Rd create mode 100644 man/permission_sets_put_resources_shares_users.Rd rename man/{apps_put_releases_shares_groups.Rd => permission_sets_put_shares_groups.Rd} (88%) rename man/{apps_put_releases_shares_users.Rd => permission_sets_put_shares_users.Rd} (88%) create mode 100644 man/permission_sets_put_transfer.Rd create mode 100644 man/projects_list_dependencies.Rd create mode 100644 man/projects_post_clone.Rd create mode 100644 man/projects_put_auto_share.Rd create mode 100644 man/projects_put_transfer.Rd create mode 100644 man/queries_delete.Rd rename man/{apps_delete_instances_shares_groups.Rd => remote_hosts_delete_shares_groups.Rd} (66%) rename man/{apps_delete_instances_shares_users.Rd => remote_hosts_delete_shares_users.Rd} (66%) create mode 100644 man/remote_hosts_get.Rd rename man/{apps_list_instances_shares.Rd => remote_hosts_list_shares.Rd} (85%) create mode 100644 man/remote_hosts_patch.Rd create mode 100644 man/remote_hosts_put.Rd rename man/{apps_put_instances_shares_groups.Rd => remote_hosts_put_shares_groups.Rd} (88%) rename man/{apps_put_instances_shares_users.Rd => remote_hosts_put_shares_users.Rd} (88%) create mode 100644 man/reports_list_dependencies.Rd create mode 100644 man/reports_list_services_dependencies.Rd create mode 100644 man/reports_patch_git.Rd create mode 100644 man/reports_post_git_checkout.Rd create mode 100644 man/reports_post_git_checkout_latest.Rd create mode 100644 man/reports_put_services_archive.Rd create mode 100644 man/reports_put_services_transfer.Rd create mode 100644 man/reports_put_transfer.Rd create mode 100644 man/roles_list.Rd create mode 100644 man/scripts_list_containers_dependencies.Rd create mode 100644 man/scripts_list_custom_dependencies.Rd create mode 100644 man/scripts_list_javascript_dependencies.Rd create mode 100644 man/scripts_list_python3_dependencies.Rd create mode 100644 man/scripts_list_r_dependencies.Rd create mode 100644 man/scripts_list_sql_dependencies.Rd create mode 100644 man/scripts_patch_container_runs.Rd create mode 100644 man/scripts_patch_javascript_git.Rd create mode 100644 man/scripts_patch_javascript_runs.Rd create mode 100644 man/scripts_patch_python3_git.Rd create mode 100644 man/scripts_patch_python3_runs.Rd create mode 100644 man/scripts_patch_r_git.Rd create mode 100644 man/scripts_patch_r_runs.Rd create mode 100644 man/scripts_patch_sql_git.Rd create mode 100644 man/scripts_patch_sql_runs.Rd create mode 100644 man/scripts_post_javascript_git_checkout.Rd create mode 100644 man/scripts_post_javascript_git_checkout_latest.Rd create mode 100644 man/scripts_post_python3_git_checkout.Rd create mode 100644 man/scripts_post_python3_git_checkout_latest.Rd create mode 100644 man/scripts_post_r_git_checkout.Rd create mode 100644 man/scripts_post_r_git_checkout_latest.Rd create mode 100644 man/scripts_post_sql_git_checkout.Rd create mode 100644 man/scripts_post_sql_git_checkout_latest.Rd create mode 100644 man/scripts_put_containers_transfer.Rd create mode 100644 man/scripts_put_custom_transfer.Rd create mode 100644 man/scripts_put_javascript_transfer.Rd create mode 100644 man/scripts_put_python3_transfer.Rd create mode 100644 man/scripts_put_r_transfer.Rd create mode 100644 man/scripts_put_sql_transfer.Rd create mode 100644 man/search_list_queries.Rd create mode 100644 man/services_list_dependencies.Rd create mode 100644 man/services_put_transfer.Rd create mode 100644 man/storage_hosts_list_dependencies.Rd create mode 100644 man/storage_hosts_put_transfer.Rd create mode 100644 man/table_tags_delete.Rd create mode 100644 man/table_tags_get.Rd create mode 100644 man/table_tags_list.Rd create mode 100644 man/table_tags_post.Rd create mode 100644 man/tables_delete_tags.Rd create mode 100644 man/tables_put_tags.Rd create mode 100644 man/templates_list_reports_dependencies.Rd create mode 100644 man/templates_list_scripts_dependencies.Rd create mode 100644 man/templates_put_reports_transfer.Rd create mode 100644 man/templates_put_scripts_transfer.Rd create mode 100644 man/users_delete_2fa.Rd create mode 100644 man/users_delete_me_superadmin.Rd create mode 100644 man/users_delete_sessions.Rd create mode 100644 man/users_get_me_themes.Rd create mode 100644 man/users_list_me_themes.Rd create mode 100644 man/users_post_me_superadmin.Rd create mode 100644 man/users_post_unsuspend.Rd create mode 100644 man/workflows_list_dependencies.Rd create mode 100644 man/workflows_patch_git.Rd create mode 100644 man/workflows_post_git_checkout.Rd create mode 100644 man/workflows_post_git_checkout_latest.Rd create mode 100644 man/workflows_put_transfer.Rd diff --git a/.Rbuildignore b/.Rbuildignore index 9257b600..1faae671 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -10,3 +10,5 @@ ^_pkgdown\.yaml$ ^CHANGELOG\.md$ ^cran-comments\.md$ +^doc$ +^Meta$ diff --git a/.gitignore b/.gitignore index 516d8941..214a6450 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ inst/web .*.swo .DS_Store civis.Rcheck/ +/doc/ +/Meta/ diff --git a/NAMESPACE b/NAMESPACE index c997dbb7..e308e924 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -64,39 +64,15 @@ export(aliases_delete_shares_users) export(aliases_get) export(aliases_get_object_type) export(aliases_list) +export(aliases_list_dependencies) export(aliases_list_shares) export(aliases_patch) export(aliases_post) export(aliases_put) export(aliases_put_shares_groups) export(aliases_put_shares_users) +export(aliases_put_transfer) export(announcements_list) -export(apps_delete_instances_projects) -export(apps_delete_instances_shares_groups) -export(apps_delete_instances_shares_users) -export(apps_delete_releases_shares_groups) -export(apps_delete_releases_shares_users) -export(apps_get) -export(apps_get_instances) -export(apps_get_releases) -export(apps_list) -export(apps_list_instances) -export(apps_list_instances_projects) -export(apps_list_instances_shares) -export(apps_list_releases) -export(apps_list_releases_shares) -export(apps_patch_instances) -export(apps_patch_releases) -export(apps_post_instances) -export(apps_post_releases) -export(apps_put_features) -export(apps_put_instances_archive) -export(apps_put_instances_projects) -export(apps_put_instances_shares_groups) -export(apps_put_instances_shares_users) -export(apps_put_releases_archive) -export(apps_put_releases_shares_groups) -export(apps_put_releases_shares_users) export(await) export(await_all) export(cancel) @@ -124,6 +100,7 @@ export(clusters_get_kubernetes_partitions) export(clusters_list_kubernetes) export(clusters_list_kubernetes_deployment_stats) export(clusters_list_kubernetes_deployments) +export(clusters_list_kubernetes_instance_configs_active_workloads) export(clusters_list_kubernetes_instance_configs_historical_graphs) export(clusters_list_kubernetes_instance_configs_user_statistics) export(clusters_list_kubernetes_partitions) @@ -131,19 +108,25 @@ export(clusters_patch_kubernetes) export(clusters_patch_kubernetes_partitions) export(clusters_post_kubernetes) export(clusters_post_kubernetes_partitions) +export(credentials_delete) export(credentials_delete_shares_groups) export(credentials_delete_shares_users) export(credentials_get) export(credentials_list) +export(credentials_list_dependencies) export(credentials_list_shares) +export(credentials_list_types) +export(credentials_patch) export(credentials_post) export(credentials_post_authenticate) export(credentials_post_temporary) export(credentials_put) export(credentials_put_shares_groups) export(credentials_put_shares_users) -export(databases_delete_whitelist_ips) +export(credentials_put_transfer) export(databases_get) +export(databases_get_schema_privileges) +export(databases_get_table_privilegesschema_name) export(databases_get_whitelist_ips) export(databases_list) export(databases_list_advanced_settings) @@ -153,7 +136,6 @@ export(databases_list_tables_search) export(databases_list_whitelist_ips) export(databases_patch_advanced_settings) export(databases_post_schemas_scan) -export(databases_post_whitelist_ips) export(databases_put_advanced_settings) export(default_credential) export(download_civis) @@ -177,17 +159,20 @@ export(enhancements_get_civis_data_match_runs) export(enhancements_get_geocode) export(enhancements_get_geocode_runs) export(enhancements_list) +export(enhancements_list_cass_ncoa_dependencies) export(enhancements_list_cass_ncoa_projects) export(enhancements_list_cass_ncoa_runs) export(enhancements_list_cass_ncoa_runs_logs) export(enhancements_list_cass_ncoa_runs_outputs) export(enhancements_list_cass_ncoa_shares) +export(enhancements_list_civis_data_match_dependencies) export(enhancements_list_civis_data_match_projects) export(enhancements_list_civis_data_match_runs) export(enhancements_list_civis_data_match_runs_logs) export(enhancements_list_civis_data_match_runs_outputs) export(enhancements_list_civis_data_match_shares) export(enhancements_list_field_mapping) +export(enhancements_list_geocode_dependencies) export(enhancements_list_geocode_projects) export(enhancements_list_geocode_runs) export(enhancements_list_geocode_runs_logs) @@ -212,27 +197,38 @@ export(enhancements_put_cass_ncoa_archive) export(enhancements_put_cass_ncoa_projects) export(enhancements_put_cass_ncoa_shares_groups) export(enhancements_put_cass_ncoa_shares_users) +export(enhancements_put_cass_ncoa_transfer) export(enhancements_put_civis_data_match) export(enhancements_put_civis_data_match_archive) export(enhancements_put_civis_data_match_projects) export(enhancements_put_civis_data_match_shares_groups) export(enhancements_put_civis_data_match_shares_users) +export(enhancements_put_civis_data_match_transfer) export(enhancements_put_geocode) export(enhancements_put_geocode_archive) export(enhancements_put_geocode_projects) export(enhancements_put_geocode_shares_groups) export(enhancements_put_geocode_shares_users) +export(enhancements_put_geocode_transfer) +export(exports_delete_files_csv_runs) export(exports_get_files_csv) +export(exports_get_files_csv_runs) export(exports_list) +export(exports_list_files_csv_runs) +export(exports_list_files_csv_runs_logs) +export(exports_list_files_csv_runs_outputs) export(exports_patch_files_csv) export(exports_post_files_csv) +export(exports_post_files_csv_runs) export(exports_put_files_csv) export(exports_put_files_csv_archive) export(feature_flags_delete_groups) +export(feature_flags_delete_organizations) export(feature_flags_delete_users) export(feature_flags_get) export(feature_flags_list) export(feature_flags_put_groups) +export(feature_flags_put_organizations) export(feature_flags_put_users) export(fetch_all) export(fetch_logs) @@ -247,6 +243,7 @@ export(files_delete_shares_groups) export(files_delete_shares_users) export(files_get) export(files_get_preprocess_csv) +export(files_list_dependencies) export(files_list_projects) export(files_list_shares) export(files_patch) @@ -261,6 +258,7 @@ export(files_put_preprocess_csv_archive) export(files_put_projects) export(files_put_shares_groups) export(files_put_shares_users) +export(files_put_transfer) export(get_database_id) export(get_error) export(get_feature_importance) @@ -270,6 +268,7 @@ export(get_table_id) export(git_repos_delete) export(git_repos_get) export(git_repos_list) +export(git_repos_list_refs) export(git_repos_post) export(groups_delete) export(groups_delete_members) @@ -277,6 +276,7 @@ export(groups_delete_shares_groups) export(groups_delete_shares_users) export(groups_get) export(groups_list) +export(groups_list_child_groups) export(groups_list_shares) export(groups_patch) export(groups_post) @@ -285,6 +285,7 @@ export(groups_put_members) export(groups_put_shares_groups) export(groups_put_shares_users) export(imports_delete_files_csv) +export(imports_delete_files_csv_runs) export(imports_delete_files_runs) export(imports_delete_projects) export(imports_delete_shares_groups) @@ -293,9 +294,13 @@ export(imports_delete_syncs) export(imports_get) export(imports_get_batches) export(imports_get_files_csv) +export(imports_get_files_csv_runs) export(imports_get_files_runs) export(imports_list) export(imports_list_batches) +export(imports_list_dependencies) +export(imports_list_files_csv_runs) +export(imports_list_files_csv_runs_logs) export(imports_list_files_runs) export(imports_list_files_runs_logs) export(imports_list_projects) @@ -308,6 +313,7 @@ export(imports_post_batches) export(imports_post_cancel) export(imports_post_files) export(imports_post_files_csv) +export(imports_post_files_csv_runs) export(imports_post_files_runs) export(imports_post_runs) export(imports_post_syncs) @@ -320,6 +326,7 @@ export(imports_put_shares_groups) export(imports_put_shares_users) export(imports_put_syncs) export(imports_put_syncs_archive) +export(imports_put_transfer) export(jobs_delete_projects) export(jobs_delete_runs) export(jobs_delete_shares_groups) @@ -328,6 +335,7 @@ export(jobs_get) export(jobs_get_runs) export(jobs_list) export(jobs_list_children) +export(jobs_list_dependencies) export(jobs_list_parents) export(jobs_list_projects) export(jobs_list_runs) @@ -341,14 +349,17 @@ export(jobs_put_archive) export(jobs_put_projects) export(jobs_put_shares_groups) export(jobs_put_shares_users) +export(jobs_put_transfer) export(json_values_delete_shares_groups) export(json_values_delete_shares_users) export(json_values_get) +export(json_values_list_dependencies) export(json_values_list_shares) export(json_values_patch) export(json_values_post) export(json_values_put_shares_groups) export(json_values_put_shares_users) +export(json_values_put_transfer) export(match_targets_delete_shares_groups) export(match_targets_delete_shares_users) export(match_targets_get) @@ -407,6 +418,7 @@ export(models_get_builds) export(models_list) export(models_list_builds) export(models_list_builds_logs) +export(models_list_dependencies) export(models_list_projects) export(models_list_schedules) export(models_list_shares) @@ -415,6 +427,7 @@ export(models_put_archive) export(models_put_projects) export(models_put_shares_groups) export(models_put_shares_users) +export(models_put_transfer) export(notebooks_delete) export(notebooks_delete_deployments) export(notebooks_delete_projects) @@ -424,6 +437,7 @@ export(notebooks_get) export(notebooks_get_deployments) export(notebooks_get_git_commits) export(notebooks_list) +export(notebooks_list_dependencies) export(notebooks_list_deployments) export(notebooks_list_deployments_logs) export(notebooks_list_git) @@ -432,9 +446,12 @@ export(notebooks_list_projects) export(notebooks_list_shares) export(notebooks_list_update_links) export(notebooks_patch) +export(notebooks_patch_git) export(notebooks_post) export(notebooks_post_clone) export(notebooks_post_deployments) +export(notebooks_post_git_checkout) +export(notebooks_post_git_checkout_latest) export(notebooks_post_git_commits) export(notebooks_put) export(notebooks_put_archive) @@ -442,8 +459,33 @@ export(notebooks_put_git) export(notebooks_put_projects) export(notebooks_put_shares_groups) export(notebooks_put_shares_users) +export(notebooks_put_transfer) export(notifications_list) export(ontology_list) +export(permission_sets_delete_resources) +export(permission_sets_delete_resources_shares_groups) +export(permission_sets_delete_resources_shares_users) +export(permission_sets_delete_shares_groups) +export(permission_sets_delete_shares_users) +export(permission_sets_get) +export(permission_sets_get_resources) +export(permission_sets_list) +export(permission_sets_list_dependencies) +export(permission_sets_list_resources) +export(permission_sets_list_resources_shares) +export(permission_sets_list_shares) +export(permission_sets_list_users_permissions) +export(permission_sets_patch) +export(permission_sets_patch_resources) +export(permission_sets_post) +export(permission_sets_post_resources) +export(permission_sets_put) +export(permission_sets_put_archive) +export(permission_sets_put_resources_shares_groups) +export(permission_sets_put_resources_shares_users) +export(permission_sets_put_shares_groups) +export(permission_sets_put_shares_users) +export(permission_sets_put_transfer) export(predictions_get) export(predictions_list) export(predictions_list_schedules) @@ -453,16 +495,21 @@ export(projects_delete_shares_groups) export(projects_delete_shares_users) export(projects_get) export(projects_list) +export(projects_list_dependencies) export(projects_list_parent_projects) export(projects_list_shares) export(projects_post) +export(projects_post_clone) export(projects_put) export(projects_put_archive) +export(projects_put_auto_share) export(projects_put_parent_projects) export(projects_put_shares_groups) export(projects_put_shares_users) +export(projects_put_transfer) export(publish_html) export(publish_rmd) +export(queries_delete) export(queries_delete_runs) export(queries_get) export(queries_get_runs) @@ -476,10 +523,18 @@ export(query_civis) export(query_civis_file) export(read_civis) export(refresh_table) +export(remote_hosts_delete_shares_groups) +export(remote_hosts_delete_shares_users) +export(remote_hosts_get) export(remote_hosts_list) export(remote_hosts_list_data_sets) +export(remote_hosts_list_shares) +export(remote_hosts_patch) export(remote_hosts_post) export(remote_hosts_post_authenticate) +export(remote_hosts_put) +export(remote_hosts_put_shares_groups) +export(remote_hosts_put_shares_users) export(reports_delete_grants) export(reports_delete_projects) export(reports_delete_services_projects) @@ -491,15 +546,20 @@ export(reports_get) export(reports_get_git_commits) export(reports_get_services) export(reports_list) +export(reports_list_dependencies) export(reports_list_git) export(reports_list_git_commits) export(reports_list_projects) +export(reports_list_services_dependencies) export(reports_list_services_projects) export(reports_list_services_shares) export(reports_list_shares) export(reports_patch) +export(reports_patch_git) export(reports_patch_services) export(reports_post) +export(reports_post_git_checkout) +export(reports_post_git_checkout_latest) export(reports_post_git_commits) export(reports_post_grants) export(reports_post_refresh) @@ -507,11 +567,15 @@ export(reports_post_services) export(reports_put_archive) export(reports_put_git) export(reports_put_projects) +export(reports_put_services_archive) export(reports_put_services_projects) export(reports_put_services_shares_groups) export(reports_put_services_shares_users) +export(reports_put_services_transfer) export(reports_put_shares_groups) export(reports_put_shares_users) +export(reports_put_transfer) +export(roles_list) export(run_civis) export(run_template) export(saml_service_providers_delete_shares_groups) @@ -568,18 +632,21 @@ export(scripts_get_sql) export(scripts_get_sql_git_commits) export(scripts_get_sql_runs) export(scripts_list) +export(scripts_list_containers_dependencies) export(scripts_list_containers_projects) export(scripts_list_containers_runs) export(scripts_list_containers_runs_logs) export(scripts_list_containers_runs_outputs) export(scripts_list_containers_shares) export(scripts_list_custom) +export(scripts_list_custom_dependencies) export(scripts_list_custom_projects) export(scripts_list_custom_runs) export(scripts_list_custom_runs_logs) export(scripts_list_custom_runs_outputs) export(scripts_list_custom_shares) export(scripts_list_history) +export(scripts_list_javascript_dependencies) export(scripts_list_javascript_git) export(scripts_list_javascript_git_commits) export(scripts_list_javascript_projects) @@ -587,6 +654,7 @@ export(scripts_list_javascript_runs) export(scripts_list_javascript_runs_logs) export(scripts_list_javascript_runs_outputs) export(scripts_list_javascript_shares) +export(scripts_list_python3_dependencies) export(scripts_list_python3_git) export(scripts_list_python3_git_commits) export(scripts_list_python3_projects) @@ -594,6 +662,7 @@ export(scripts_list_python3_runs) export(scripts_list_python3_runs_logs) export(scripts_list_python3_runs_outputs) export(scripts_list_python3_shares) +export(scripts_list_r_dependencies) export(scripts_list_r_git) export(scripts_list_r_git_commits) export(scripts_list_r_projects) @@ -601,6 +670,7 @@ export(scripts_list_r_runs) export(scripts_list_r_runs_logs) export(scripts_list_r_runs_outputs) export(scripts_list_r_shares) +export(scripts_list_sql_dependencies) export(scripts_list_sql_git) export(scripts_list_sql_git_commits) export(scripts_list_sql_projects) @@ -610,12 +680,21 @@ export(scripts_list_sql_runs_outputs) export(scripts_list_sql_shares) export(scripts_list_types) export(scripts_patch) +export(scripts_patch_container_runs) export(scripts_patch_containers) export(scripts_patch_custom) export(scripts_patch_javascript) +export(scripts_patch_javascript_git) +export(scripts_patch_javascript_runs) export(scripts_patch_python3) +export(scripts_patch_python3_git) +export(scripts_patch_python3_runs) export(scripts_patch_r) +export(scripts_patch_r_git) +export(scripts_patch_r_runs) export(scripts_patch_sql) +export(scripts_patch_sql_git) +export(scripts_patch_sql_runs) export(scripts_post) export(scripts_post_cancel) export(scripts_post_containers) @@ -629,22 +708,30 @@ export(scripts_post_custom_runs) export(scripts_post_custom_runs_outputs) export(scripts_post_javascript) export(scripts_post_javascript_clone) +export(scripts_post_javascript_git_checkout) +export(scripts_post_javascript_git_checkout_latest) export(scripts_post_javascript_git_commits) export(scripts_post_javascript_runs) export(scripts_post_javascript_runs_outputs) export(scripts_post_python3) export(scripts_post_python3_clone) +export(scripts_post_python3_git_checkout) +export(scripts_post_python3_git_checkout_latest) export(scripts_post_python3_git_commits) export(scripts_post_python3_runs) export(scripts_post_python3_runs_outputs) export(scripts_post_r) export(scripts_post_r_clone) +export(scripts_post_r_git_checkout) +export(scripts_post_r_git_checkout_latest) export(scripts_post_r_git_commits) export(scripts_post_r_runs) export(scripts_post_r_runs_outputs) export(scripts_post_run) export(scripts_post_sql) export(scripts_post_sql_clone) +export(scripts_post_sql_git_checkout) +export(scripts_post_sql_git_checkout_latest) export(scripts_post_sql_git_commits) export(scripts_post_sql_runs) export(scripts_put_containers) @@ -652,36 +739,43 @@ export(scripts_put_containers_archive) export(scripts_put_containers_projects) export(scripts_put_containers_shares_groups) export(scripts_put_containers_shares_users) +export(scripts_put_containers_transfer) export(scripts_put_custom) export(scripts_put_custom_archive) export(scripts_put_custom_projects) export(scripts_put_custom_shares_groups) export(scripts_put_custom_shares_users) +export(scripts_put_custom_transfer) export(scripts_put_javascript) export(scripts_put_javascript_archive) export(scripts_put_javascript_git) export(scripts_put_javascript_projects) export(scripts_put_javascript_shares_groups) export(scripts_put_javascript_shares_users) +export(scripts_put_javascript_transfer) export(scripts_put_python3) export(scripts_put_python3_archive) export(scripts_put_python3_git) export(scripts_put_python3_projects) export(scripts_put_python3_shares_groups) export(scripts_put_python3_shares_users) +export(scripts_put_python3_transfer) export(scripts_put_r) export(scripts_put_r_archive) export(scripts_put_r_git) export(scripts_put_r_projects) export(scripts_put_r_shares_groups) export(scripts_put_r_shares_users) +export(scripts_put_r_transfer) export(scripts_put_sql) export(scripts_put_sql_archive) export(scripts_put_sql_git) export(scripts_put_sql_projects) export(scripts_put_sql_shares_groups) export(scripts_put_sql_shares_users) +export(scripts_put_sql_transfer) export(search_list) +export(search_list_queries) export(search_list_types) export(services_delete) export(services_delete_deployments) @@ -692,6 +786,7 @@ export(services_delete_tokens) export(services_get) export(services_get_deployments) export(services_list) +export(services_list_dependencies) export(services_list_deployments) export(services_list_deployments_logs) export(services_list_projects) @@ -708,19 +803,27 @@ export(services_put_archive) export(services_put_projects) export(services_put_shares_groups) export(services_put_shares_users) +export(services_put_transfer) export(sql) export(storage_hosts_delete) export(storage_hosts_delete_shares_groups) export(storage_hosts_delete_shares_users) export(storage_hosts_get) export(storage_hosts_list) +export(storage_hosts_list_dependencies) export(storage_hosts_list_shares) export(storage_hosts_patch) export(storage_hosts_post) export(storage_hosts_put) export(storage_hosts_put_shares_groups) export(storage_hosts_put_shares_users) +export(storage_hosts_put_transfer) +export(table_tags_delete) +export(table_tags_get) +export(table_tags_list) +export(table_tags_post) export(tables_delete_projects) +export(tables_delete_tags) export(tables_get) export(tables_get_enhancements_cass_ncoa) export(tables_get_enhancements_geocodings) @@ -733,6 +836,7 @@ export(tables_post_enhancements_geocodings) export(tables_post_refresh) export(tables_post_scan) export(tables_put_projects) +export(tables_put_tags) export(templates_delete_reports) export(templates_delete_reports_shares_groups) export(templates_delete_reports_shares_users) @@ -743,8 +847,10 @@ export(templates_delete_scripts_shares_users) export(templates_get_reports) export(templates_get_scripts) export(templates_list_reports) +export(templates_list_reports_dependencies) export(templates_list_reports_shares) export(templates_list_scripts) +export(templates_list_scripts_dependencies) export(templates_list_scripts_projects) export(templates_list_scripts_shares) export(templates_patch_reports) @@ -756,25 +862,34 @@ export(templates_post_scripts_review) export(templates_put_reports) export(templates_put_reports_shares_groups) export(templates_put_reports_shares_users) +export(templates_put_reports_transfer) export(templates_put_scripts) export(templates_put_scripts_projects) export(templates_put_scripts_shares_groups) export(templates_put_scripts_shares_users) +export(templates_put_scripts_transfer) export(transfer_table) +export(users_delete_2fa) export(users_delete_api_keys) export(users_delete_me_favorites) +export(users_delete_me_superadmin) +export(users_delete_sessions) export(users_get) export(users_get_api_keys) +export(users_get_me_themes) export(users_list) export(users_list_api_keys) export(users_list_me) export(users_list_me_favorites) +export(users_list_me_themes) export(users_list_me_ui) export(users_patch) export(users_patch_me) export(users_post) export(users_post_api_keys) export(users_post_me_favorites) +export(users_post_me_superadmin) +export(users_post_unsuspend) export(workflows_delete_projects) export(workflows_delete_shares_groups) export(workflows_delete_shares_users) @@ -783,18 +898,22 @@ export(workflows_get_executions) export(workflows_get_executions_tasks) export(workflows_get_git_commits) export(workflows_list) +export(workflows_list_dependencies) export(workflows_list_executions) export(workflows_list_git) export(workflows_list_git_commits) export(workflows_list_projects) export(workflows_list_shares) export(workflows_patch) +export(workflows_patch_git) export(workflows_post) export(workflows_post_clone) export(workflows_post_executions) export(workflows_post_executions_cancel) export(workflows_post_executions_resume) export(workflows_post_executions_retry) +export(workflows_post_git_checkout) +export(workflows_post_git_checkout_latest) export(workflows_post_git_commits) export(workflows_put) export(workflows_put_archive) @@ -802,6 +921,7 @@ export(workflows_put_git) export(workflows_put_projects) export(workflows_put_shares_groups) export(workflows_put_shares_users) +export(workflows_put_transfer) export(write_civis) export(write_civis_file) export(write_job_output) diff --git a/R/civis_ml.R b/R/civis_ml.R index e7b7ae52..0d116b10 100644 --- a/R/civis_ml.R +++ b/R/civis_ml.R @@ -90,24 +90,24 @@ #' column. The \code{"sparse_*"} models include a LASSO regression step #' (using \code{glmnet}) to do feature selection before passing data to the #' final model. In some models, CivisML uses default parameters from those in -#' \href{http://scikit-learn.org/stable/}{Scikit-Learn}, as indicated in the "Altered Defaults" column. +#' \href{https://scikit-learn.org/stable/}{Scikit-Learn}, as indicated in the "Altered Defaults" column. #' All models also have \code{random_state=42}. #' #' Specific workflows can also be called directly using the R workflow functions. #' #' \tabular{rrrrr}{ #' Name \tab R Workflow \tab Model Type \tab Algorithm \tab Altered Defaults \cr -#' \code{sparse_logistic} \tab \code{\link{civis_ml_sparse_logistic}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html}{LogisticRegression} \tab \code{C=499999950, tol=1e-08} \cr -#' \code{gradient_boosting_classifier} \tab \code{\link{civis_ml_gradient_boosting_classifier}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html}{GradientBoostingClassifier} \tab \code{n_estimators=500, max_depth=2} \cr -#' \code{random_forest_classifier} \tab \code{\link{civis_ml_random_forest_classifier}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html}{RandomForestClassifier} \tab \code{n_estimators=500} \cr -#' \code{extra_trees_classifier} \tab \code{\link{civis_ml_extra_trees_classifier}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html}{ExtraTreesClassifier} \tab \code{n_estimators=500} \cr +#' \code{sparse_logistic} \tab \code{\link{civis_ml_sparse_logistic}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html}{LogisticRegression} \tab \code{C=499999950, tol=1e-08} \cr +#' \code{gradient_boosting_classifier} \tab \code{\link{civis_ml_gradient_boosting_classifier}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html}{GradientBoostingClassifier} \tab \code{n_estimators=500, max_depth=2} \cr +#' \code{random_forest_classifier} \tab \code{\link{civis_ml_random_forest_classifier}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html}{RandomForestClassifier} \tab \code{n_estimators=500} \cr +#' \code{extra_trees_classifier} \tab \code{\link{civis_ml_extra_trees_classifier}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html}{ExtraTreesClassifier} \tab \code{n_estimators=500} \cr #' \code{multilayer_perceptron_classifier} \tab \tab classification \tab \href{https://github.com/civisanalytics/muffnn}{muffnn.MLPClassifier} \tab \cr #' \code{stacking_classifier} \tab \tab classification \tab \href{https://github.com/civisanalytics/civisml-extensions}{StackedClassifier}\tab \cr -#' \code{sparse_linear_regressor} \tab \code{\link{civis_ml_sparse_linear_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html}{LinearRegression} \tab \cr -#' \code{sparse_ridge_regressor} \tab \code{\link{civis_ml_sparse_ridge_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html}{Ridge} \tab \cr -#' \code{gradient_boosting_regressor} \tab \code{\link{civis_ml_gradient_boosting_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html}{GradientBoostingRegressor} \tab \code{n_estimators=500, max_depth=2} \cr -#' \code{random_forest_regressor} \tab \code{\link{civis_ml_random_forest_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html}{RandomForestRegressor} \tab \code{n_estimators=500} \cr -#' \code{extra_trees_regressor} \tab \code{\link{civis_ml_extra_trees_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html}{ExtraTreesRegressor} \tab \code{n_estimators=500} \cr +#' \code{sparse_linear_regressor} \tab \code{\link{civis_ml_sparse_linear_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html}{LinearRegression} \tab \cr +#' \code{sparse_ridge_regressor} \tab \code{\link{civis_ml_sparse_ridge_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html}{Ridge} \tab \cr +#' \code{gradient_boosting_regressor} \tab \code{\link{civis_ml_gradient_boosting_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html}{GradientBoostingRegressor} \tab \code{n_estimators=500, max_depth=2} \cr +#' \code{random_forest_regressor} \tab \code{\link{civis_ml_random_forest_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html}{RandomForestRegressor} \tab \code{n_estimators=500} \cr +#' \code{extra_trees_regressor} \tab \code{\link{civis_ml_extra_trees_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html}{ExtraTreesRegressor} \tab \code{n_estimators=500} \cr #' \code{multilayer_perceptron_regressor} \tab \tab regression \tab \href{https://github.com/civisanalytics/muffnn}{muffnn.MLPRegressor} \tab \cr #' \code{stacking_regressor} \tab \tab regression \tab \href{https://github.com/civisanalytics/civisml-extensions}{StackedRegressor}\tab \cr #' } @@ -118,9 +118,9 @@ #' \code{"random_forest_classifier"} predefined models together with a #' \code{glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='log_loss')}. #' Defaults for the predefined models are documented in \code{?civis_ml}. Each column is first -#' \href{http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html}{standardized}, +#' \href{https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html}{standardized}, #' and then the model predictions are combined using -#' \href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html}{LogisticRegressionCV} +#' \href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html}{LogisticRegressionCV} #' with \code{penalty='l2'} and \code{tol=1e-08}. The \code{"stacking_regressor"} works similarly, stacking together #' the \code{"gradient_boosting_regressor"} and \code{"random_forest_regressor"} models and a #' \code{glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2')}, combining them using diff --git a/R/generated_client.R b/R/generated_client.R index 91196a27..6d29987b 100644 --- a/R/generated_client.R +++ b/R/generated_client.R @@ -233,6 +233,10 @@ admin_patch_themes <- function(id, name = NULL, organization_ids = NULL, setting #' \item{csMetadata}{string, Additional metadata about the organization in JSON format.} #' \item{removeFooterInEmails}{boolean, If true, emails sent by platform will not include Civis text.} #' \item{salesforceAccountId}{string, The SalesForce Account ID for this organization.} +#' \item{tableauSiteId}{string, The Tableau Site ID for this organization.} +#' \item{fedrampEnabled}{boolean, Flag denoting whether this organization is FedRAMP compliant.} +#' \item{createdById}{integer, The ID of the user who created this organization} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this organization} #' \item{advancedSettings}{list, A list containing the following elements: #' \itemize{ #' \item dedicatedDjPoolEnabled boolean, If true, the Organization has a dedicated delayed jobs pool. Defaults to false. @@ -427,8 +431,72 @@ aliases_delete_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +aliases_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/aliases/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +aliases_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/aliases/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List Aliases -#' @param object_type string optional. Filter results by object type. Pass multiple object types with a comma-separatedlist. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. +#' @param object_type string optional. Filter results by object type. Pass multiple object types with a comma-separatedlist. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. #' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. #' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id, object_type. @@ -437,7 +505,7 @@ aliases_delete_shares_groups <- function(id, group_id) { #' @return An array containing the following fields: #' \item{id}{integer, The id of the Alias object.} #' \item{objectId}{integer, The id of the object} -#' \item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +#' \item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} #' \item{alias}{string, The alias of the object} #' \item{userId}{integer, The id of the user who created the alias} #' \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} @@ -461,14 +529,14 @@ aliases_list <- function(object_type = NULL, limit = NULL, page_num = NULL, orde #' Create an Alias #' @param object_id integer required. The id of the object -#' @param object_type string required. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. +#' @param object_type string required. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. #' @param alias string required. The alias of the object #' @param display_name string optional. The display name of the Alias object. Defaults to object name if not provided. #' #' @return A list containing the following elements: #' \item{id}{integer, The id of the Alias object.} #' \item{objectId}{integer, The id of the object} -#' \item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +#' \item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} #' \item{alias}{string, The alias of the object} #' \item{userId}{integer, The id of the user who created the alias} #' \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} @@ -496,7 +564,7 @@ aliases_post <- function(object_id, object_type, alias, display_name = NULL) { #' @return A list containing the following elements: #' \item{id}{integer, The id of the Alias object.} #' \item{objectId}{integer, The id of the object} -#' \item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +#' \item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} #' \item{alias}{string, The alias of the object} #' \item{userId}{integer, The id of the user who created the alias} #' \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} @@ -521,14 +589,14 @@ aliases_get <- function(id) { #' Replace all attributes of this Alias #' @param id integer required. The id of the Alias object. #' @param object_id integer required. The id of the object -#' @param object_type string required. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. +#' @param object_type string required. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. #' @param alias string required. The alias of the object #' @param display_name string optional. The display name of the Alias object. Defaults to object name if not provided. #' #' @return A list containing the following elements: #' \item{id}{integer, The id of the Alias object.} #' \item{objectId}{integer, The id of the object} -#' \item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +#' \item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} #' \item{alias}{string, The alias of the object} #' \item{userId}{integer, The id of the user who created the alias} #' \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} @@ -553,14 +621,14 @@ aliases_put <- function(id, object_id, object_type, alias, display_name = NULL) #' Update some attributes of this Alias #' @param id integer required. The id of the Alias object. #' @param object_id integer optional. The id of the object -#' @param object_type string optional. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. +#' @param object_type string optional. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. #' @param alias string optional. The alias of the object #' @param display_name string optional. The display name of the Alias object. Defaults to object name if not provided. #' #' @return A list containing the following elements: #' \item{id}{integer, The id of the Alias object.} #' \item{objectId}{integer, The id of the object} -#' \item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +#' \item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} #' \item{alias}{string, The alias of the object} #' \item{userId}{integer, The id of the user who created the alias} #' \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} @@ -605,13 +673,13 @@ aliases_delete <- function(id) { #' Get details about an alias within an FCO type -#' @param object_type string required. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. +#' @param object_type string required. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report. #' @param alias string required. The alias of the object #' #' @return A list containing the following elements: #' \item{id}{integer, The id of the Alias object.} #' \item{objectId}{integer, The id of the object} -#' \item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +#' \item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} #' \item{alias}{string, The alias of the object} #' \item{userId}{integer, The id of the user who created the alias} #' \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} @@ -664,20 +732,39 @@ announcements_list <- function(limit = NULL, page_num = NULL, order = NULL, orde } -#' List apps +#' List Kubernetes Clusters +#' @param organization_id integer optional. The ID of this cluster's organization. Cannot be used along with the organization slug. +#' @param organization_slug string optional. The slug of this cluster's organization. Cannot be used along with the organization ID. +#' @param raw_cluster_slug string optional. The slug of this cluster's raw configuration. +#' @param exclude_inactive_orgs boolean optional. When true, excludes KubeClusters associated with inactive orgs. Defaults to false. +#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to organization_id. Must be one of: organization_id, created_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. #' #' @return An array containing the following fields: -#' \item{slug}{string, The slug for the application.} -#' \item{id}{integer, The unique id of the application.} -#' \item{instanceName}{string, A word that describes an instance of this app.} -#' \item{name}{string, The name of the application.} +#' \item{id}{integer, The ID of this cluster.} +#' \item{organizationId}{string, The id of this cluster's organization.} +#' \item{organizationName}{string, The name of this cluster's organization.} +#' \item{organizationSlug}{string, The slug of this cluster's organization.} +#' \item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +#' \item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} +#' \item{clusterPartitions}{array, An array containing the following fields: +#' \itemize{ +#' \item clusterPartitionId integer, The ID of this cluster partition. +#' \item name string, The name of the cluster partition. +#' \item labels array, Labels associated with this partition. +#' \item instanceConfigs array, The instances configured for this cluster partition. +#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. +#' }} +#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} #' @export -apps_list <- function() { +clusters_list_kubernetes <- function(organization_id = NULL, organization_slug = NULL, raw_cluster_slug = NULL, exclude_inactive_orgs = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/" + path <- "/clusters/kubernetes" path_params <- list() - query_params <- list() + query_params <- list(organization_id = organization_id, organization_slug = organization_slug, raw_cluster_slug = raw_cluster_slug, exclude_inactive_orgs = exclude_inactive_orgs, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -689,30 +776,75 @@ apps_list <- function() { } -#' List details of a Decision Application -#' @param slug string required. The slug for the application. +#' Create a Kubernetes Cluster +#' @param organization_id string optional. The id of this cluster's organization. +#' @param organization_slug string optional. The slug of this cluster's organization. +#' @param raw_cluster_slug string optional. The slug of this cluster's raw configuration. +#' @param is_nat_enabled boolean optional. Whether this cluster needs a NAT gateway or not. #' #' @return A list containing the following elements: -#' \item{slug}{string, The slug for the application.} -#' \item{id}{integer, The unique id of the application.} -#' \item{instanceName}{string, A word that describes an instance of this app.} -#' \item{name}{string, The name of the application.} -#' \item{currentRelease}{list, A list containing the following elements: +#' \item{id}{integer, The ID of this cluster.} +#' \item{organizationId}{string, The id of this cluster's organization.} +#' \item{organizationName}{string, The name of this cluster's organization.} +#' \item{organizationSlug}{string, The slug of this cluster's organization.} +#' \item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +#' \item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} +#' \item{clusterPartitions}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The unique id of the release. -#' \item appId integer, The id of the app the release belongs to. -#' \item reportTemplateId integer, ID of the report template for this release. -#' \item resources object, A hash of resources associated with this release. -#' \item archived string, The archival status of the requested item(s). +#' \item clusterPartitionId integer, The ID of this cluster partition. +#' \item name string, The name of the cluster partition. +#' \item labels array, Labels associated with this partition. +#' \item instanceConfigs array, The instances configured for this cluster partition. +#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. #' }} -#' \item{features}{list, App features.} +#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} +#' \item{hours}{number, The number of hours used this month for this cluster.} #' @export -apps_get <- function(slug) { +clusters_post_kubernetes <- function(organization_id = NULL, organization_slug = NULL, raw_cluster_slug = NULL, is_nat_enabled = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}" - path_params <- list(slug = slug) + path <- "/clusters/kubernetes" + path_params <- list() query_params <- list() + body_params <- list(organizationId = organization_id, organizationSlug = organization_slug, rawClusterSlug = raw_cluster_slug, isNatEnabled = is_nat_enabled) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Describe a Kubernetes Cluster +#' @param id integer required. +#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this cluster.} +#' \item{organizationId}{string, The id of this cluster's organization.} +#' \item{organizationName}{string, The name of this cluster's organization.} +#' \item{organizationSlug}{string, The slug of this cluster's organization.} +#' \item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +#' \item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} +#' \item{clusterPartitions}{array, An array containing the following fields: +#' \itemize{ +#' \item clusterPartitionId integer, The ID of this cluster partition. +#' \item name string, The name of the cluster partition. +#' \item labels array, Labels associated with this partition. +#' \item instanceConfigs array, The instances configured for this cluster partition. +#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. +#' }} +#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} +#' \item{hours}{number, The number of hours used this month for this cluster.} +#' @export +clusters_get_kubernetes <- function(id, include_usage_stats = NULL) { + + args <- as.list(match.call())[-1] + path <- "/clusters/kubernetes/{id}" + path_params <- list(id = id) + query_params <- list(include_usage_stats = include_usage_stats) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -724,59 +856,66 @@ apps_get <- function(slug) { } -#' Update the Decision Application features for a given organization -#' @param slug string required. The slug for the application. -#' @param org string required. Organization. -#' @param features list required. App features. +#' Update a Kubernetes Cluster +#' @param id integer required. The ID of this cluster. +#' @param raw_cluster_slug string optional. The slug of this cluster's raw configuration. +#' @param is_nat_enabled boolean optional. Whether this cluster needs a NAT gateway or not. #' #' @return A list containing the following elements: -#' \item{slug}{string, The slug for the application.} -#' \item{id}{integer, The unique id of the application.} -#' \item{instanceName}{string, A word that describes an instance of this app.} -#' \item{name}{string, The name of the application.} -#' \item{currentRelease}{list, A list containing the following elements: +#' \item{id}{integer, The ID of this cluster.} +#' \item{organizationId}{string, The id of this cluster's organization.} +#' \item{organizationName}{string, The name of this cluster's organization.} +#' \item{organizationSlug}{string, The slug of this cluster's organization.} +#' \item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +#' \item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} +#' \item{clusterPartitions}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The unique id of the release. -#' \item appId integer, The id of the app the release belongs to. -#' \item reportTemplateId integer, ID of the report template for this release. -#' \item resources object, A hash of resources associated with this release. -#' \item archived string, The archival status of the requested item(s). +#' \item clusterPartitionId integer, The ID of this cluster partition. +#' \item name string, The name of the cluster partition. +#' \item labels array, Labels associated with this partition. +#' \item instanceConfigs array, The instances configured for this cluster partition. +#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. #' }} -#' \item{features}{list, App features.} +#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} +#' \item{hours}{number, The number of hours used this month for this cluster.} #' @export -apps_put_features <- function(slug, org, features) { +clusters_patch_kubernetes <- function(id, raw_cluster_slug = NULL, is_nat_enabled = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/features/{org}" - path_params <- list(slug = slug, org = org) + path <- "/clusters/kubernetes/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list(features = features) + body_params <- list(rawClusterSlug = raw_cluster_slug, isNatEnabled = is_nat_enabled) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' List the instances of a Decision Application -#' @param slug string required. The slug for the application. -#' @param archived string optional. The archival status of the requested item(s). -#' @param app_release_id integer optional. If supplied, return only instances matching this release. +#' List the deployments associated with a Kubernetes Cluster +#' @param id integer required. The id of the cluster. +#' @param base_type string optional. If specified, return deployments of these base types. It accepts a comma-separated list, possible values are 'Notebook', 'Service', 'Run'. +#' @param state string optional. If specified, return deployments in these states. It accepts a comma-separated list, possible values are pending, running, terminated, sleeping #' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id, created_at. +#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at. #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The unique id of the instance.} -#' \item{name}{string, The name of the instance.} -#' \item{appReleaseId}{integer, The id of the app release the instance belongs to.} -#' \item{reportId}{integer, The id of the report the instance belongs to.} -#' \item{createdAt}{string, The time the instance was created at.} -#' \item{user}{list, A list containing the following elements: +#' \item{id}{integer, The id of this deployment.} +#' \item{name}{string, The name of the deployment.} +#' \item{baseId}{integer, The id of the base object associated with the deployment.} +#' \item{baseType}{string, The base type of this deployment.} +#' \item{state}{string, The state of the deployment.} +#' \item{cpu}{integer, The CPU in millicores required by the deployment.} +#' \item{memory}{integer, The memory in MB required by the deployment.} +#' \item{diskSpace}{integer, The disk space in GB required by the deployment.} +#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -784,15 +923,17 @@ apps_put_features <- function(slug, org, features) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' @export -apps_list_instances <- function(slug, archived = NULL, app_release_id = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +clusters_list_kubernetes_deployments <- function(id, base_type = NULL, state = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances" - path_params <- list(slug = slug) - query_params <- list(archived = archived, app_release_id = app_release_id, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/clusters/kubernetes/{id}/deployments" + path_params <- list(id = id) + query_params <- list(base_type = base_type, state = state, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -804,67 +945,60 @@ apps_list_instances <- function(slug, archived = NULL, app_release_id = NULL, li } -#' Create a new instance of an application of the given slug -#' @param slug string required. The slug for the application. -#' @param name string optional. The name of the instance. +#' Get stats about deployments associated with a Kubernetes Cluster +#' @param id integer required. The ID of this cluster. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the instance.} -#' \item{name}{string, The name of the instance.} -#' \item{appReleaseId}{integer, The id of the app release the instance belongs to.} -#' \item{reportId}{integer, The id of the report the instance belongs to.} -#' \item{createdAt}{string, The time the instance was created at.} -#' \item{user}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -#' \item{authCodeUrl}{string, } -#' \item{apiKey}{string, A Civis API key that can be used by this app instance.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' @return An array containing the following fields: +#' \item{baseType}{string, The base type of this deployment} +#' \item{state}{string, State of the deployment} +#' \item{count}{integer, Number of deployments of base type and state} +#' \item{totalCpu}{integer, Total amount of CPU in millicores for deployments of base type and state} +#' \item{totalMemory}{integer, Total amount of Memory in megabytes for deployments of base type and state} #' @export -apps_post_instances <- function(slug, name = NULL) { +clusters_list_kubernetes_deployment_stats <- function(id) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances" - path_params <- list(slug = slug) + path <- "/clusters/kubernetes/{id}/deployment_stats" + path_params <- list(id = id) query_params <- list() - body_params <- list(name = name) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List the releases of a particular Decision Application -#' @param slug string required. The slug for the application. -#' @param archived string optional. The archival status of the requested item(s). -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' List Cluster Partitions for given cluster +#' @param id integer required. +#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. #' #' @return An array containing the following fields: -#' \item{id}{integer, The unique id of the release.} -#' \item{appId}{integer, The id of the app the release belongs to.} -#' \item{reportTemplateId}{integer, ID of the report template for this release.} -#' \item{resources}{list, A hash of resources associated with this release.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} +#' \item{name}{string, The name of the cluster partition.} +#' \item{labels}{array, Labels associated with this partition.} +#' \item{instanceConfigs}{array, An array containing the following fields: +#' \itemize{ +#' \item instanceConfigId integer, The ID of this InstanceConfig. +#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. +#' \item minInstances integer, The minimum number of instances of that type in this cluster. +#' \item maxInstances integer, The maximum number of instances of that type in this cluster. +#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. +#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. +#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. +#' \item usageStats object, +#' }} +#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} #' @export -apps_list_releases <- function(slug, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +clusters_list_kubernetes_partitions <- function(id, include_usage_stats = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases" - path_params <- list(slug = slug) - query_params <- list(archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/clusters/kubernetes/{id}/partitions" + path_params <- list(id = id) + query_params <- list(include_usage_stats = include_usage_stats) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -876,25 +1010,41 @@ apps_list_releases <- function(slug, archived = NULL, limit = NULL, page_num = N } -#' Create a new Decision Application release -#' @param slug string required. The slug for the application. -#' @param report_template_id integer required. ID of the report template for this release. -#' @param resources list required. A hash of resources associated with this release. +#' Create a Cluster Partition for given cluster +#' @param id integer required. The ID of the cluster which this partition belongs to. +#' @param instance_configs array required. An array containing the following fields: +#' \itemize{ +#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. +#' \item minInstances integer, The minimum number of instances of that type in this cluster. +#' \item maxInstances integer, The maximum number of instances of that type in this cluster. +#' } +#' @param name string required. The name of the cluster partition. +#' @param labels array required. Labels associated with this partition. #' #' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the release.} -#' \item{appId}{integer, The id of the app the release belongs to.} -#' \item{reportTemplateId}{integer, ID of the report template for this release.} -#' \item{resources}{list, A hash of resources associated with this release.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} +#' \item{name}{string, The name of the cluster partition.} +#' \item{labels}{array, Labels associated with this partition.} +#' \item{instanceConfigs}{array, An array containing the following fields: +#' \itemize{ +#' \item instanceConfigId integer, The ID of this InstanceConfig. +#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. +#' \item minInstances integer, The minimum number of instances of that type in this cluster. +#' \item maxInstances integer, The maximum number of instances of that type in this cluster. +#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. +#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. +#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. +#' \item usageStats object, +#' }} +#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} #' @export -apps_post_releases <- function(slug, report_template_id, resources) { +clusters_post_kubernetes_partitions <- function(id, instance_configs, name, labels) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases" - path_params <- list(slug = slug) + path <- "/clusters/kubernetes/{id}/partitions" + path_params <- list(id = id) query_params <- list() - body_params <- list(reportTemplateId = report_template_id, resources = resources) + body_params <- list(instanceConfigs = instance_configs, name = name, labels = labels) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -905,93 +1055,103 @@ apps_post_releases <- function(slug, report_template_id, resources) { } -#' Return a given app release -#' @param id integer required. The unique id of the release. -#' @param slug string required. The slug for the application. +#' Update a Cluster Partition +#' @param id integer required. The ID of the cluster which this partition belongs to. +#' @param cluster_partition_id integer required. The ID of this cluster partition. +#' @param instance_configs array optional. An array containing the following fields: +#' \itemize{ +#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. +#' \item minInstances integer, The minimum number of instances of that type in this cluster. +#' \item maxInstances integer, The maximum number of instances of that type in this cluster. +#' } +#' @param name string optional. The name of the cluster partition. +#' @param labels array optional. Labels associated with this partition. #' #' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the release.} -#' \item{appId}{integer, The id of the app the release belongs to.} -#' \item{reportTemplateId}{integer, ID of the report template for this release.} -#' \item{resources}{list, A hash of resources associated with this release.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} +#' \item{name}{string, The name of the cluster partition.} +#' \item{labels}{array, Labels associated with this partition.} +#' \item{instanceConfigs}{array, An array containing the following fields: +#' \itemize{ +#' \item instanceConfigId integer, The ID of this InstanceConfig. +#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. +#' \item minInstances integer, The minimum number of instances of that type in this cluster. +#' \item maxInstances integer, The maximum number of instances of that type in this cluster. +#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. +#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. +#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. +#' \item usageStats object, +#' }} +#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} #' @export -apps_get_releases <- function(id, slug) { +clusters_patch_kubernetes_partitions <- function(id, cluster_partition_id, instance_configs = NULL, name = NULL, labels = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}" - path_params <- list(id = id, slug = slug) + path <- "/clusters/kubernetes/{id}/partitions/{cluster_partition_id}" + path_params <- list(id = id, cluster_partition_id = cluster_partition_id) query_params <- list() - body_params <- list() + body_params <- list(instanceConfigs = instance_configs, name = name, labels = labels) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Update an existing Decision Application release -#' @param slug string required. The slug for the application. -#' @param id integer required. The unique id of the release. -#' @param report_template_id integer optional. ID of the report template for this release. -#' @param resources list optional. A hash of resources associated with this release. +#' Delete a Cluster Partition +#' @param id integer required. The ID of the cluster which this partition belongs to. +#' @param cluster_partition_id integer required. The ID of this cluster partition. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the release.} -#' \item{appId}{integer, The id of the app the release belongs to.} -#' \item{reportTemplateId}{integer, ID of the report template for this release.} -#' \item{resources}{list, A hash of resources associated with this release.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' @return An empty HTTP response #' @export -apps_patch_releases <- function(slug, id, report_template_id = NULL, resources = NULL) { +clusters_delete_kubernetes_partitions <- function(id, cluster_partition_id) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}" - path_params <- list(slug = slug, id = id) + path <- "/clusters/kubernetes/{id}/partitions/{cluster_partition_id}" + path_params <- list(id = id, cluster_partition_id = cluster_partition_id) query_params <- list() - body_params <- list(reportTemplateId = report_template_id, resources = resources) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Return a given app instance -#' @param id integer required. The unique id of the instance. -#' @param slug string required. The slug for the application. +#' Describe a Cluster Partition +#' @param id integer required. The ID of the cluster which this partition belongs to. +#' @param cluster_partition_id integer required. The ID of this cluster partition. +#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. #' #' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the instance.} -#' \item{name}{string, The name of the instance.} -#' \item{appReleaseId}{integer, The id of the app release the instance belongs to.} -#' \item{reportId}{integer, The id of the report the instance belongs to.} -#' \item{createdAt}{string, The time the instance was created at.} -#' \item{user}{list, A list containing the following elements: +#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} +#' \item{name}{string, The name of the cluster partition.} +#' \item{labels}{array, Labels associated with this partition.} +#' \item{instanceConfigs}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item instanceConfigId integer, The ID of this InstanceConfig. +#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. +#' \item minInstances integer, The minimum number of instances of that type in this cluster. +#' \item maxInstances integer, The maximum number of instances of that type in this cluster. +#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. +#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. +#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. +#' \item usageStats object, #' }} -#' \item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -#' \item{authCodeUrl}{string, } -#' \item{apiKey}{string, A Civis API key that can be used by this app instance.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} #' @export -apps_get_instances <- function(id, slug) { +clusters_get_kubernetes_partitions <- function(id, cluster_partition_id, include_usage_stats = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}" - path_params <- list(id = id, slug = slug) - query_params <- list() + path <- "/clusters/kubernetes/{id}/partitions/{cluster_partition_id}" + path_params <- list(id = id, cluster_partition_id = cluster_partition_id) + query_params <- list(include_usage_stats = include_usage_stats) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -1003,76 +1163,80 @@ apps_get_instances <- function(id, slug) { } -#' Update a given app instance -#' @param id integer required. The unique id of the instance. -#' @param slug string required. The slug for the application. -#' @param name string optional. The name of the instance. +#' Describe an Instance Config +#' @param instance_config_id integer required. The ID of this instance config. +#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. #' #' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the instance.} -#' \item{name}{string, The name of the instance.} -#' \item{appReleaseId}{integer, The id of the app release the instance belongs to.} -#' \item{reportId}{integer, The id of the report the instance belongs to.} -#' \item{createdAt}{string, The time the instance was created at.} -#' \item{user}{list, A list containing the following elements: +#' \item{instanceConfigId}{integer, The ID of this InstanceConfig.} +#' \item{instanceType}{string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge.} +#' \item{minInstances}{integer, The minimum number of instances of that type in this cluster.} +#' \item{maxInstances}{integer, The maximum number of instances of that type in this cluster.} +#' \item{instanceMaxMemory}{integer, The amount of memory (RAM) available to a single instance of that type in megabytes.} +#' \item{instanceMaxCpu}{integer, The number of processor shares available to a single instance of that type in millicores.} +#' \item{instanceMaxDisk}{integer, The amount of disk available to a single instance of that type in gigabytes.} +#' \item{usageStats}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item pendingMemoryRequested integer, The sum of memory requests (in MB) for pending deployments in this instance config. +#' \item pendingCpuRequested integer, The sum of cpu requests (in millicores) for pending deployments in this instance config. +#' \item runningMemoryRequested integer, The sum of memory requests (in MB) for running deployments in this instance config. +#' \item runningCpuRequested integer, The sum of cpu requests (in millicores) for running deployments in this instance config. +#' \item pendingDeployments integer, The number of pending deployments in this instance config. +#' \item runningDeployments integer, The number of running deployments in this instance config. #' }} -#' \item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -#' \item{authCodeUrl}{string, } -#' \item{apiKey}{string, A Civis API key that can be used by this app instance.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{clusterPartitionId}{integer, The ID of this InstanceConfig's cluster partition} +#' \item{clusterPartitionName}{string, The name of this InstanceConfig's cluster partition} #' @export -apps_patch_instances <- function(id, slug, name = NULL) { +clusters_get_kubernetes_instance_configs <- function(instance_config_id, include_usage_stats = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}" - path_params <- list(id = id, slug = slug) - query_params <- list() - body_params <- list(name = name) + path <- "/clusters/kubernetes/instance_configs/{instance_config_id}" + path_params <- list(instance_config_id = instance_config_id) + query_params <- list(include_usage_stats = include_usage_stats) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List users and groups permissioned on this object -#' @param slug string required. The slug for the application. -#' @param id integer required. The ID of the resource that is shared. +#' List active workloads in an Instance Config +#' @param id integer required. The id of the instance config. +#' @param state string optional. If specified, return workloads in these states. It accepts a comma-separated list, possible values are pending, running #' #' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{id}{integer, The id of this deployment.} +#' \item{baseType}{string, The base type of this deployment.} +#' \item{baseId}{integer, The id of the base object associated with this deployment.} +#' \item{baseObjectName}{string, The name of the base object associated with this deployment. Null if you do not have permission to read the object.} +#' \item{jobType}{string, If the base object is a job run you have permission to read, the type of the job. One of "python_script", "r_script", "container_script", or "custom_script".} +#' \item{jobId}{integer, If the base object is a job run you have permission to read, the id of the job.} +#' \item{jobCancelRequestedAt}{string, If the base object is a job run you have permission to read, and it was requested to be cancelled, the timestamp of that request.} +#' \item{state}{string, The state of this deployment.} +#' \item{cpu}{integer, The CPU in millicores requested by this deployment.} +#' \item{memory}{integer, The memory in MB requested by this deployment.} +#' \item{diskSpace}{integer, The disk space in GB requested by this deployment.} +#' \item{user}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{createdAt}{string, The timestamp of when the deployment began.} +#' \item{cancellable}{boolean, True if you have permission to cancel this deployment.} #' @export -apps_list_releases_shares <- function(slug, id) { +clusters_list_kubernetes_instance_configs_active_workloads <- function(id, state = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}/shares" - path_params <- list(slug = slug, id = id) - query_params <- list() + path <- "/clusters/kubernetes/instance_configs/{id}/active_workloads" + path_params <- list(id = id) + query_params <- list(state = state) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -1084,161 +1248,229 @@ apps_list_releases_shares <- function(slug, id) { } -#' Set the permissions users have on this object -#' @param slug string required. The slug for the application. -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Get statistics about the current users of an Instance Config +#' @param instance_config_id integer required. The ID of this instance config. +#' @param order string optional. The field on which to order the result set. Defaults to running_deployments. Must be one of pending_memory_requested, pending_cpu_requested, running_memory_requested, running_cpu_requested, pending_deployments, running_deployments. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending). Defaults to desc. #' -#' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @return An array containing the following fields: +#' \item{userId}{string, The owning user's ID} +#' \item{userName}{string, The owning user's name} +#' \item{pendingDeployments}{integer, The number of deployments belonging to the owning user in "pending" state} +#' \item{pendingMemoryRequested}{integer, The sum of memory requests (in MB) for deployments belonging to the owning user in "pending" state} +#' \item{pendingCpuRequested}{integer, The sum of CPU requests (in millicores) for deployments belonging to the owning user in "pending" state} +#' \item{runningDeployments}{integer, The number of deployments belonging to the owning user in "running" state} +#' \item{runningMemoryRequested}{integer, The sum of memory requests (in MB) for deployments belonging to the owning user in "running" state} +#' \item{runningCpuRequested}{integer, The sum of CPU requests (in millicores) for deployments belonging to the owning user in "running" state} #' @export -apps_put_releases_shares_users <- function(slug, id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +clusters_list_kubernetes_instance_configs_user_statistics <- function(instance_config_id, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}/shares/users" - path_params <- list(slug = slug, id = id) - query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path <- "/clusters/kubernetes/instance_configs/{instance_config_id}/user_statistics" + path_params <- list(instance_config_id = instance_config_id) + query_params <- list(order = order, order_dir = order_dir) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a user has on this object -#' @param slug string required. The slug for the application. -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' Get graphs of historical resource usage in an Instance Config +#' @param instance_config_id integer required. The ID of this instance config. +#' @param timeframe string optional. The span of time that the graphs cover. Must be one of 1_day, 1_week. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{cpuGraphUrl}{string, URL for the graph of historical CPU usage in this instance config.} +#' \item{memGraphUrl}{string, URL for the graph of historical memory usage in this instance config.} #' @export -apps_delete_releases_shares_users <- function(slug, id, user_id) { +clusters_list_kubernetes_instance_configs_historical_graphs <- function(instance_config_id, timeframe = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}/shares/users/{user_id}" - path_params <- list(slug = slug, id = id, user_id = user_id) - query_params <- list() + path <- "/clusters/kubernetes/instance_configs/{instance_config_id}/historical_graphs" + path_params <- list(instance_config_id = instance_config_id) + query_params <- list(timeframe = timeframe) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param slug string required. The slug for the application. -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Get list of Credential Types #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{types}{array, list of acceptable credential types} #' @export -apps_put_releases_shares_groups <- function(slug, id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +credentials_list_types <- function() { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}/shares/groups" - path_params <- list(slug = slug, id = id) + path <- "/credentials/types" + path_params <- list() query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object -#' @param slug string required. The slug for the application. -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' List credentials +#' @param type string optional. The type (or types) of credentials to return. One or more of: Amazon Web Services S3, Bitbucket, CASS/NCOA PAF, Certificate, Civis Platform, Custom, Database, Google, Github, Salesforce User, Salesforce Client, and TableauUser. Specify multiple values as a comma-separated list (e.g., "A,B"). +#' @param remote_host_id integer optional. The ID of the remote host associated with the credentials to return. +#' @param default boolean optional. If true, will return a list with a single credential which is the current user's default credential. +#' @param system_credentials boolean optional. If true, will only return system credentials. System credentials can only be created and viewed by Civis Admins. +#' @param users string optional. A comma-separated list of user ids. If specified, returns set of credentials owned by the users that requesting user has at least read access on. +#' @param name string optional. If specified, will be used to filter the credentials returned. Will search across name and will return any full name containing the search string. +#' @param limit integer optional. Number of results to return. Defaults to its maximum of 1000. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, created_at, name. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the credential.} +#' \item{name}{string, The name identifying the credential} +#' \item{type}{string, The credential's type.} +#' \item{username}{string, The username for the credential.} +#' \item{description}{string, A long description of the credential.} +#' \item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} +#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} +#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} +#' \item{createdAt}{string, The creation time for this credential.} +#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} #' @export -apps_delete_releases_shares_groups <- function(slug, id, group_id) { +credentials_list <- function(type = NULL, remote_host_id = NULL, default = NULL, system_credentials = NULL, users = NULL, name = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}/shares/groups/{group_id}" - path_params <- list(slug = slug, id = id, group_id = group_id) - query_params <- list() + path <- "/credentials/" + path_params <- list() + query_params <- list(type = type, remote_host_id = remote_host_id, default = default, system_credentials = system_credentials, users = users, name = name, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param slug string required. The slug for the application. -#' @param status boolean required. The desired archived status of the object. +#' Create a credential +#' @param type string required. The type of credential. Note: only these credentials can be created or edited via this API ["Amazon Web Services S3", "CASS/NCOA PAF", "Certificate", "Civis Platform", "Custom", "Database", "Google", "Salesforce User", "Salesforce Client", "TableauUser"] +#' @param username string required. The username for the credential. +#' @param password string required. The password for the credential. +#' @param name string optional. The name identifying the credential. +#' @param description string optional. A long description of the credential. +#' @param remote_host_id integer optional. The ID of the remote host associated with the credential. +#' @param user_id integer optional. The ID of the user the credential is created for. Note: This attribute is only accepted if you are a Civis Admin User. +#' @param state string optional. The U.S. state for the credential. Only for VAN credentials. +#' @param system_credential boolean optional. Boolean flag that sets a credential to be a system credential. System credentials can only be created by Civis Admins and will create a credential owned by the Civis Robot user. +#' @param default boolean optional. Whether or not the credential is a default. Only for Database credentials. #' #' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the release.} -#' \item{appId}{integer, The id of the app the release belongs to.} -#' \item{reportTemplateId}{integer, ID of the report template for this release.} -#' \item{resources}{list, A hash of resources associated with this release.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' @export -apps_put_releases_archive <- function(id, slug, status) { - - args <- as.list(match.call())[-1] - path <- "/apps/{slug}/releases/{id}/archive" - path_params <- list(id = id, slug = slug) - query_params <- list() - body_params <- list(status = status) +#' \item{id}{integer, The ID of the credential.} +#' \item{name}{string, The name identifying the credential} +#' \item{type}{string, The credential's type.} +#' \item{username}{string, The username for the credential.} +#' \item{description}{string, A long description of the credential.} +#' \item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} +#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} +#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} +#' \item{createdAt}{string, The creation time for this credential.} +#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} +#' @export +credentials_post <- function(type, username, password, name = NULL, description = NULL, remote_host_id = NULL, user_id = NULL, state = NULL, system_credential = NULL, default = NULL) { + + args <- as.list(match.call())[-1] + path <- "/credentials/" + path_params <- list() + query_params <- list() + body_params <- list(type = type, username = username, password = password, name = name, description = description, remoteHostId = remote_host_id, userId = user_id, state = state, systemCredential = system_credential, default = default) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Update an existing credential +#' @param id integer required. The ID of the credential. +#' @param type string required. The type of credential. Note: only these credentials can be created or edited via this API ["Amazon Web Services S3", "CASS/NCOA PAF", "Certificate", "Civis Platform", "Custom", "Database", "Google", "Salesforce User", "Salesforce Client", "TableauUser"] +#' @param username string required. The username for the credential. +#' @param password string required. The password for the credential. +#' @param name string optional. The name identifying the credential. +#' @param description string optional. A long description of the credential. +#' @param remote_host_id integer optional. The ID of the remote host associated with the credential. +#' @param user_id integer optional. The ID of the user the credential is created for. Note: This attribute is only accepted if you are a Civis Admin User. +#' @param state string optional. The U.S. state for the credential. Only for VAN credentials. +#' @param system_credential boolean optional. Boolean flag that sets a credential to be a system credential. System credentials can only be created by Civis Admins and will create a credential owned by the Civis Robot user. +#' @param default boolean optional. Whether or not the credential is a default. Only for Database credentials. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the credential.} +#' \item{name}{string, The name identifying the credential} +#' \item{type}{string, The credential's type.} +#' \item{username}{string, The username for the credential.} +#' \item{description}{string, A long description of the credential.} +#' \item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} +#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} +#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} +#' \item{createdAt}{string, The creation time for this credential.} +#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} +#' @export +credentials_put <- function(id, type, username, password, name = NULL, description = NULL, remote_host_id = NULL, user_id = NULL, state = NULL, system_credential = NULL, default = NULL) { + + args <- as.list(match.call())[-1] + path <- "/credentials/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list(type = type, username = username, password = password, name = name, description = description, remoteHostId = remote_host_id, userId = user_id, state = state, systemCredential = system_credential, default = default) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -1249,8 +1481,194 @@ apps_put_releases_archive <- function(id, slug, status) { } +#' Update some attributes of a credential +#' @param id integer required. The ID of the credential. +#' @param name string optional. The name identifying the credential. +#' @param type string optional. The type of credential. Note: only these credentials can be created or edited via this API ["Amazon Web Services S3", "CASS/NCOA PAF", "Certificate", "Civis Platform", "Custom", "Database", "Google", "Salesforce User", "Salesforce Client", "TableauUser"] +#' @param description string optional. A long description of the credential. +#' @param username string optional. The username for the credential. +#' @param password string optional. The password for the credential. +#' @param remote_host_id integer optional. The ID of the remote host associated with the credential. +#' @param user_id integer optional. The ID of the user the credential is created for. Note: This attribute is only accepted if you are a Civis Admin User. +#' @param state string optional. The U.S. state for the credential. Only for VAN credentials. +#' @param system_credential boolean optional. Boolean flag that sets a credential to be a system credential. System credentials can only be created by Civis Admins and will create a credential owned by the Civis Robot user. +#' @param default boolean optional. Whether or not the credential is a default. Only for Database credentials. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the credential.} +#' \item{name}{string, The name identifying the credential} +#' \item{type}{string, The credential's type.} +#' \item{username}{string, The username for the credential.} +#' \item{description}{string, A long description of the credential.} +#' \item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} +#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} +#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} +#' \item{createdAt}{string, The creation time for this credential.} +#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} +#' @export +credentials_patch <- function(id, name = NULL, type = NULL, description = NULL, username = NULL, password = NULL, remote_host_id = NULL, user_id = NULL, state = NULL, system_credential = NULL, default = NULL) { + + args <- as.list(match.call())[-1] + path <- "/credentials/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list(name = name, type = type, description = description, username = username, password = password, remoteHostId = remote_host_id, userId = user_id, state = state, systemCredential = system_credential, default = default) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get a credential +#' @param id integer required. The ID of the credential. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the credential.} +#' \item{name}{string, The name identifying the credential} +#' \item{type}{string, The credential's type.} +#' \item{username}{string, The username for the credential.} +#' \item{description}{string, A long description of the credential.} +#' \item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} +#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} +#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} +#' \item{createdAt}{string, The creation time for this credential.} +#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} +#' @export +credentials_get <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/credentials/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Delete a credential +#' @param id integer required. The ID of the credential. +#' +#' @return An empty HTTP response +#' @export +credentials_delete <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/credentials/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Authenticate against a remote host +#' @param url string required. The URL to your host. +#' @param remote_host_type string required. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce +#' @param username string required. The username for the credential. +#' @param password string required. The password for the credential. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the credential.} +#' \item{name}{string, The name identifying the credential} +#' \item{type}{string, The credential's type.} +#' \item{username}{string, The username for the credential.} +#' \item{description}{string, A long description of the credential.} +#' \item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} +#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} +#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} +#' \item{createdAt}{string, The creation time for this credential.} +#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} +#' @export +credentials_post_authenticate <- function(url, remote_host_type, username, password) { + + args <- as.list(match.call())[-1] + path <- "/credentials/authenticate" + path_params <- list() + query_params <- list() + body_params <- list(url = url, remoteHostType = remote_host_type, username = username, password = password) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Generate a temporary credential for accessing S3 +#' @param id integer required. The ID of the credential. +#' @param duration integer optional. The number of seconds the temporary credential should be valid. Defaults to 15 minutes. Must not be less than 15 minutes or greater than 36 hours. +#' +#' @return A list containing the following elements: +#' \item{accessKey}{string, The identifier of the credential.} +#' \item{secretAccessKey}{string, The secret part of the credential.} +#' \item{sessionToken}{string, The session token identifier.} +#' @export +credentials_post_temporary <- function(id, duration = NULL) { + + args <- as.list(match.call())[-1] + path <- "/credentials/{id}/temporary" + path_params <- list(id = id) + query_params <- list() + body_params <- list(duration = duration) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List users and groups permissioned on this object -#' @param slug string required. The slug for the application. #' @param id integer required. The ID of the resource that is shared. #' #' @return An array containing the following fields: @@ -1272,11 +1690,11 @@ apps_put_releases_archive <- function(id, slug, status) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -apps_list_instances_shares <- function(slug, id) { +credentials_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/shares" - path_params <- list(slug = slug, id = id) + path <- "/credentials/{id}/shares" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -1290,7 +1708,6 @@ apps_list_instances_shares <- function(slug, id) { #' Set the permissions users have on this object -#' @param slug string required. The slug for the application. #' @param id integer required. The ID of the resource that is shared. #' @param user_ids array required. An array of one or more user IDs. #' @param permission_level string required. Options are: "read", "write", or "manage". @@ -1316,11 +1733,11 @@ apps_list_instances_shares <- function(slug, id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -apps_put_instances_shares_users <- function(slug, id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +credentials_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/shares/users" - path_params <- list(slug = slug, id = id) + path <- "/credentials/{id}/shares/users" + path_params <- list(id = id) query_params <- list() body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] @@ -1334,17 +1751,16 @@ apps_put_instances_shares_users <- function(slug, id, user_ids, permission_level #' Revoke the permissions a user has on this object -#' @param slug string required. The slug for the application. #' @param id integer required. The ID of the resource that is shared. #' @param user_id integer required. The ID of the user. #' #' @return An empty HTTP response #' @export -apps_delete_instances_shares_users <- function(slug, id, user_id) { +credentials_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/shares/users/{user_id}" - path_params <- list(slug = slug, id = id, user_id = user_id) + path <- "/credentials/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -1358,7 +1774,6 @@ apps_delete_instances_shares_users <- function(slug, id, user_id) { #' Set the permissions groups has on this object -#' @param slug string required. The slug for the application. #' @param id integer required. The ID of the resource that is shared. #' @param group_ids array required. An array of one or more group IDs. #' @param permission_level string required. Options are: "read", "write", or "manage". @@ -1384,11 +1799,11 @@ apps_delete_instances_shares_users <- function(slug, id, user_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -apps_put_instances_shares_groups <- function(slug, id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +credentials_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/shares/groups" - path_params <- list(slug = slug, id = id) + path <- "/credentials/{id}/shares/groups" + path_params <- list(id = id) query_params <- list() body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] @@ -1402,17 +1817,16 @@ apps_put_instances_shares_groups <- function(slug, id, group_ids, permission_lev #' Revoke the permissions a group has on this object -#' @param slug string required. The slug for the application. #' @param id integer required. The ID of the resource that is shared. #' @param group_id integer required. The ID of the group. #' #' @return An empty HTTP response #' @export -apps_delete_instances_shares_groups <- function(slug, id, group_id) { +credentials_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/shares/groups/{group_id}" - path_params <- list(slug = slug, id = id, group_id = group_id) + path <- "/credentials/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -1425,170 +1839,133 @@ apps_delete_instances_shares_groups <- function(slug, id, group_id) { } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param slug string required. The slug for the application. -#' @param status boolean required. The desired archived status of the object. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' -#' @return A list containing the following elements: -#' \item{id}{integer, The unique id of the instance.} -#' \item{name}{string, The name of the instance.} -#' \item{appReleaseId}{integer, The id of the app release the instance belongs to.} -#' \item{reportId}{integer, The id of the report the instance belongs to.} -#' \item{createdAt}{string, The time the instance was created at.} -#' \item{user}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -#' \item{authCodeUrl}{string, } -#' \item{apiKey}{string, A Civis API key that can be used by this app instance.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -apps_put_instances_archive <- function(id, slug, status) { +credentials_list_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/archive" - path_params <- list(id = id, slug = slug) - query_params <- list() - body_params <- list(status = status) + path <- "/credentials/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List the projects an App Instance belongs to -#' @param id integer required. The ID of the App Instance. -#' @param slug string required. The slug for the application. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user #' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} #' @export -apps_list_instances_projects <- function(id, slug, hidden = NULL) { +credentials_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/projects" - path_params <- list(id = id, slug = slug) - query_params <- list(hidden = hidden) - body_params <- list() + path <- "/credentials/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Add an App Instance to a project -#' @param id integer required. The ID of the App Instance. -#' @param project_id integer required. The ID of the project. -#' @param slug string required. The slug for the application. +#' List databases #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for the database.} +#' \item{name}{string, The name of the database.} +#' \item{adapter}{string, The type of the database.} #' @export -apps_put_instances_projects <- function(id, project_id, slug) { +databases_list <- function() { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id, slug = slug) + path <- "/databases/" + path_params <- list() query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Remove an App Instance from a project -#' @param id integer required. The ID of the App Instance. -#' @param project_id integer required. The ID of the project. -#' @param slug string required. The slug for the application. +#' Show database information +#' @param id integer required. The ID for the database. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the database.} +#' \item{name}{string, The name of the database.} +#' \item{adapter}{string, The type of the database.} #' @export -apps_delete_instances_projects <- function(id, project_id, slug) { +databases_get <- function(id) { args <- as.list(match.call())[-1] - path <- "/apps/{slug}/instances/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id, slug = slug) + path <- "/databases/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List Kubernetes Clusters -#' @param organization_slug string optional. The slug of this cluster's organization. -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to organization_id. Must be one of: organization_id, created_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' List schemas in this database +#' @param id integer required. The ID of the database. +#' @param name string optional. If specified, will be used to filter the schemas returned. Substring matching is supported (e.g., "name=schema" will return both "schema1" and "schema2"). +#' @param credential_id integer optional. If provided, schemas will be filtered based on the given credential. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of this cluster.} -#' \item{organizationId}{string, The id of this cluster's organization.} -#' \item{organizationName}{string, The name of this cluster's organization.} -#' \item{organizationSlug}{string, The slug of this cluster's organization.} -#' \item{clusterPartitions}{array, An array containing the following fields: -#' \itemize{ -#' \item clusterPartitionId integer, The ID of this cluster partition. -#' \item name string, The name of the cluster partition. -#' \item labels array, Labels associated with this partition. -#' \item instanceConfigs array, The instances configured for this cluster partition. -#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. -#' }} -#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} +#' \item{schema}{string, The name of a schema.} #' @export -clusters_list_kubernetes <- function(organization_slug = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +databases_list_schemas <- function(id, name = NULL, credential_id = NULL) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes" - path_params <- list() - query_params <- list(organization_slug = organization_slug, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/databases/{id}/schemas" + path_params <- list(id = id) + query_params <- list(name = name, credential_id = credential_id) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -1600,34 +1977,22 @@ clusters_list_kubernetes <- function(organization_slug = NULL, limit = NULL, pag } -#' Create a Kubernetes Cluster -#' @param organization_id string optional. The id of this cluster's organization. -#' @param organization_slug string optional. The slug of this cluster's organization. -#' @param is_nat_enabled boolean optional. Whether this cluster needs a NAT gateway or not. +#' Creates and enqueues a schema scanner job +#' @param id integer required. The ID of the database. +#' @param schema string required. The name of the schema. +#' @param stats_priority string optional. When to sync table statistics for every table in the schema. Valid options are the following. Option: 'flag' means to flag stats for the next scheduled run of a full table scan on the database. Option: 'block' means to block this job on stats syncing. Option: 'queue' means to queue a separate job for syncing stats and do not block this job on the queued job. Defaults to 'flag' #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this cluster.} -#' \item{organizationId}{string, The id of this cluster's organization.} -#' \item{organizationName}{string, The name of this cluster's organization.} -#' \item{organizationSlug}{string, The slug of this cluster's organization.} -#' \item{clusterPartitions}{array, An array containing the following fields: -#' \itemize{ -#' \item clusterPartitionId integer, The ID of this cluster partition. -#' \item name string, The name of the cluster partition. -#' \item labels array, Labels associated with this partition. -#' \item instanceConfigs array, The instances configured for this cluster partition. -#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. -#' }} -#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} -#' \item{hours}{number, The number of hours used this month for this cluster } +#' \item{jobId}{integer, The ID of the job created.} +#' \item{runId}{integer, The ID of the run created.} #' @export -clusters_post_kubernetes <- function(organization_id = NULL, organization_slug = NULL, is_nat_enabled = NULL) { +databases_post_schemas_scan <- function(id, schema, stats_priority = NULL) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes" - path_params <- list() + path <- "/databases/{id}/schemas/scan" + path_params <- list(id = id) query_params <- list() - body_params <- list(organizationId = organization_id, organizationSlug = organization_slug, isNatEnabled = is_nat_enabled) + body_params <- list(schema = schema, statsPriority = stats_priority) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -1638,32 +2003,51 @@ clusters_post_kubernetes <- function(organization_id = NULL, organization_slug = } -#' Describe a Kubernetes Cluster -#' @param id integer required. -#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. +#' List tables in the specified database, deprecated use "GET /tables" instead +#' @param id integer required. The ID of the database. +#' @param name string optional. If specified, will be used to filter the tables returned. Substring matching is supported (e.g., "name=table" will return both "table1" and "my table"). +#' @param limit integer optional. Number of results to return. Defaults to 200. Maximum allowed is 1000. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to name. Must be one of: name. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this cluster.} -#' \item{organizationId}{string, The id of this cluster's organization.} -#' \item{organizationName}{string, The name of this cluster's organization.} -#' \item{organizationSlug}{string, The slug of this cluster's organization.} -#' \item{clusterPartitions}{array, An array containing the following fields: +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the table.} +#' \item{databaseId}{integer, The ID of the database.} +#' \item{schema}{string, The name of the schema containing the table.} +#' \item{name}{string, Name of the table.} +#' \item{description}{string, The description of the table, as specified by the table owner} +#' \item{isView}{boolean, True if this table represents a view. False if it represents a regular table.} +#' \item{rowCount}{integer, The number of rows in the table.} +#' \item{columnCount}{integer, The number of columns in the table.} +#' \item{sizeMb}{number, The size of the table in megabytes.} +#' \item{owner}{string, The database username of the table's owner.} +#' \item{distkey}{string, The column used as the Amazon Redshift distkey.} +#' \item{sortkeys}{string, The column used as the Amazon Redshift sortkey.} +#' \item{refreshStatus}{string, How up-to-date the table's statistics on row counts, null counts, distinct counts, and values distributions are. One of: refreshing, stale, or current.} +#' \item{lastRefresh}{string, The time of the last statistics refresh.} +#' \item{refreshId}{string, The ID of the most recent statistics refresh.} +#' \item{lastRun}{list, A list containing the following elements: #' \itemize{ -#' \item clusterPartitionId integer, The ID of this cluster partition. -#' \item name string, The name of the cluster partition. -#' \item labels array, Labels associated with this partition. -#' \item instanceConfigs array, The instances configured for this cluster partition. -#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{tableTags}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, Table Tag ID +#' \item name string, Table Tag Name #' }} -#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} -#' \item{hours}{number, The number of hours used this month for this cluster } #' @export -clusters_get_kubernetes <- function(id, include_usage_stats = NULL) { +databases_list_tables <- function(id, name = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}" + path <- "/databases/{id}/tables" path_params <- list(id = id) - query_params <- list(include_usage_stats = include_usage_stats) + query_params <- list(name = name, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -1675,79 +2059,77 @@ clusters_get_kubernetes <- function(id, include_usage_stats = NULL) { } -#' Update a Kubernetes Cluster -#' @param id integer required. The ID of this cluster. -#' @param is_nat_enabled boolean optional. Whether this cluster needs a NAT gateway or not. +#' List tables in the specified database, deprecated use "GET /tables" instead +#' @param id integer required. The ID of the database. +#' @param name string optional. If specified, will be used to filter the tables returned. Substring matching is supported (e.g., "name=table" will return both "table1" and "my table"). +#' @param column_name string optional. Search for tables containing a column with the given name. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this cluster.} -#' \item{organizationId}{string, The id of this cluster's organization.} -#' \item{organizationName}{string, The name of this cluster's organization.} -#' \item{organizationSlug}{string, The slug of this cluster's organization.} -#' \item{clusterPartitions}{array, An array containing the following fields: +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the table.} +#' \item{databaseId}{integer, The ID of the database.} +#' \item{schema}{string, The name of the schema containing the table.} +#' \item{name}{string, Name of the table.} +#' \item{description}{string, The description of the table, as specified by the table owner} +#' \item{isView}{boolean, True if this table represents a view. False if it represents a regular table.} +#' \item{rowCount}{integer, The number of rows in the table.} +#' \item{columnCount}{integer, The number of columns in the table.} +#' \item{sizeMb}{number, The size of the table in megabytes.} +#' \item{owner}{string, The database username of the table's owner.} +#' \item{distkey}{string, The column used as the Amazon Redshift distkey.} +#' \item{sortkeys}{string, The column used as the Amazon Redshift sortkey.} +#' \item{refreshStatus}{string, How up-to-date the table's statistics on row counts, null counts, distinct counts, and values distributions are. One of: refreshing, stale, or current.} +#' \item{lastRefresh}{string, The time of the last statistics refresh.} +#' \item{refreshId}{string, The ID of the most recent statistics refresh.} +#' \item{lastRun}{list, A list containing the following elements: #' \itemize{ -#' \item clusterPartitionId integer, The ID of this cluster partition. -#' \item name string, The name of the cluster partition. -#' \item labels array, Labels associated with this partition. -#' \item instanceConfigs array, The instances configured for this cluster partition. -#' \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} -#' \item{hours}{number, The number of hours used this month for this cluster } +#' \item{tableTags}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, Table Tag ID +#' \item name string, Table Tag Name +#' }} +#' \item{columnNames}{array, The names of each column in the table.} #' @export -clusters_patch_kubernetes <- function(id, is_nat_enabled = NULL) { +databases_list_tables_search <- function(id, name = NULL, column_name = NULL) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}" + path <- "/databases/{id}/tables-search" path_params <- list(id = id) - query_params <- list() - body_params <- list(isNatEnabled = is_nat_enabled) + query_params <- list(name = name, column_name = column_name) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List the deployments associated with a Kubernetes Cluster -#' @param id integer required. The id of the cluster. -#' @param base_type string optional. If specified, return deployments of these base types. It accepts a comma-separated list, possible values are 'Notebook', 'Service', 'Run'. -#' @param state string optional. If specified, return deployments in these states. It accepts a comma-separated list, possible values are pending, running, terminated, sleeping -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' Show table privileges +#' @param id integer required. The ID of the database +#' @param schema_name string required. The name of the schema +#' @param table_name string required. The name of the table #' -#' @return An array containing the following fields: -#' \item{id}{integer, The id of this deployment.} -#' \item{name}{string, The name of the deployment.} -#' \item{baseId}{integer, The id of the base object associated with the deployment.} -#' \item{baseType}{string, The base type of this deployment.} -#' \item{state}{string, The state of the deployment.} -#' \item{cpu}{integer, The CPU in millicores required by the deployment.} -#' \item{memory}{integer, The memory in MB required by the deployment.} -#' \item{diskSpace}{integer, The disk space in GB required by the deployment.} -#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } +#' @return A list containing the following elements: +#' \item{grantee}{string, Name of the granted user or group} +#' \item{granteeType}{string, User or group} +#' \item{privileges}{array, Privileges that the grantee has on this resource} +#' \item{grantablePrivileges}{array, Privileges that the grantee can grant to others for this resource} #' @export -clusters_list_kubernetes_deployments <- function(id, base_type = NULL, state = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +databases_get_table_privilegesschema_name <- function(id, schema_name, table_name) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}/deployments" - path_params <- list(id = id) - query_params <- list(base_type = base_type, state = state, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/databases/{id}/table_privileges/{schema_name}/{table_name}" + path_params <- list(id = id, schema_name = schema_name, table_name = table_name) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -1759,21 +2141,21 @@ clusters_list_kubernetes_deployments <- function(id, base_type = NULL, state = N } -#' Get stats about deployments associated with a Kubernetes Cluster -#' @param id integer required. The ID of this cluster. +#' Show schema privileges +#' @param id integer required. The ID of the database +#' @param schema_name string required. The name of the schema #' -#' @return An array containing the following fields: -#' \item{baseType}{string, The base type of this deployment} -#' \item{state}{string, State of the deployment} -#' \item{count}{integer, Number of deployments of base type and state} -#' \item{totalCpu}{integer, Total amount of CPU in millicores for deployments of base type and state} -#' \item{totalMemory}{integer, Total amount of Memory in megabytes for deployments of base type and state} +#' @return A list containing the following elements: +#' \item{grantee}{string, Name of the granted user or group} +#' \item{granteeType}{string, User or group} +#' \item{privileges}{array, Privileges that the grantee has on this resource} +#' \item{grantablePrivileges}{array, Privileges that the grantee can grant to others for this resource} #' @export -clusters_list_kubernetes_deployment_stats <- function(id) { +databases_get_schema_privileges <- function(id, schema_name) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}/deployment_stats" - path_params <- list(id = id) + path <- "/databases/{id}/schema_privileges/{schema_name}" + path_params <- list(id = id, schema_name = schema_name) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -1786,33 +2168,23 @@ clusters_list_kubernetes_deployment_stats <- function(id) { } -#' List Cluster Partitions for given cluster -#' @param id integer required. -#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. +#' List whitelisted IPs for the specified database +#' @param id integer required. The ID for the database. #' #' @return An array containing the following fields: -#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} -#' \item{name}{string, The name of the cluster partition.} -#' \item{labels}{array, Labels associated with this partition.} -#' \item{instanceConfigs}{array, An array containing the following fields: -#' \itemize{ -#' \item instanceConfigId integer, The ID of this InstanceConfig. -#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. -#' \item minInstances integer, The minimum number of instances of that type in this cluster. -#' \item maxInstances integer, The maximum number of instances of that type in this cluster. -#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. -#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. -#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. -#' \item usageStats object, -#' }} -#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} +#' \item{id}{integer, The ID of this whitelisted IP address.} +#' \item{remoteHostId}{integer, The ID of the database this rule is applied to.} +#' \item{securityGroupId}{string, The ID of the security group this rule is applied to.} +#' \item{subnetMask}{string, The subnet mask that is allowed by this rule.} +#' \item{createdAt}{string, The time this rule was created.} +#' \item{updatedAt}{string, The time this rule was last updated.} #' @export -clusters_list_kubernetes_partitions <- function(id, include_usage_stats = NULL) { +databases_list_whitelist_ips <- function(id) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}/partitions" + path <- "/databases/{id}/whitelist-ips" path_params <- list(id = id) - query_params <- list(include_usage_stats = include_usage_stats) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -1824,219 +2196,118 @@ clusters_list_kubernetes_partitions <- function(id, include_usage_stats = NULL) } -#' Create a Cluster Partition for given cluster -#' @param id integer required. The ID of the cluster which this partition belongs to. -#' @param instance_configs array required. An array containing the following fields: -#' \itemize{ -#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. -#' \item minInstances integer, The minimum number of instances of that type in this cluster. -#' \item maxInstances integer, The maximum number of instances of that type in this cluster. -#' } -#' @param name string required. The name of the cluster partition. -#' @param labels array required. Labels associated with this partition. +#' View details about a whitelisted IP +#' @param id integer required. The ID of the database this rule is applied to. +#' @param whitelisted_ip_id integer required. The ID of this whitelisted IP address. #' #' @return A list containing the following elements: -#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} -#' \item{name}{string, The name of the cluster partition.} -#' \item{labels}{array, Labels associated with this partition.} -#' \item{instanceConfigs}{array, An array containing the following fields: -#' \itemize{ -#' \item instanceConfigId integer, The ID of this InstanceConfig. -#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. -#' \item minInstances integer, The minimum number of instances of that type in this cluster. -#' \item maxInstances integer, The maximum number of instances of that type in this cluster. -#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. -#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. -#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. -#' \item usageStats object, -#' }} -#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} +#' \item{id}{integer, The ID of this whitelisted IP address.} +#' \item{remoteHostId}{integer, The ID of the database this rule is applied to.} +#' \item{securityGroupId}{string, The ID of the security group this rule is applied to.} +#' \item{subnetMask}{string, The subnet mask that is allowed by this rule.} +#' \item{authorizedBy}{string, The user who authorized this rule.} +#' \item{isActive}{boolean, True if the rule is applied, false if it has been revoked.} +#' \item{createdAt}{string, The time this rule was created.} +#' \item{updatedAt}{string, The time this rule was last updated.} #' @export -clusters_post_kubernetes_partitions <- function(id, instance_configs, name, labels) { +databases_get_whitelist_ips <- function(id, whitelisted_ip_id) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}/partitions" - path_params <- list(id = id) + path <- "/databases/{id}/whitelist-ips/{whitelisted_ip_id}" + path_params <- list(id = id, whitelisted_ip_id = whitelisted_ip_id) query_params <- list() - body_params <- list(instanceConfigs = instance_configs, name = name, labels = labels) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update a Cluster Partition -#' @param id integer required. The ID of the cluster which this partition belongs to. -#' @param cluster_partition_id integer required. The ID of this cluster partition. -#' @param instance_configs array optional. An array containing the following fields: -#' \itemize{ -#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. -#' \item minInstances integer, The minimum number of instances of that type in this cluster. -#' \item maxInstances integer, The maximum number of instances of that type in this cluster. -#' } -#' @param name string optional. The name of the cluster partition. -#' @param labels array optional. Labels associated with this partition. +#' Get the advanced settings for this database +#' @param id integer required. The ID of the database this advanced settings object belongs to. #' #' @return A list containing the following elements: -#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} -#' \item{name}{string, The name of the cluster partition.} -#' \item{labels}{array, Labels associated with this partition.} -#' \item{instanceConfigs}{array, An array containing the following fields: -#' \itemize{ -#' \item instanceConfigId integer, The ID of this InstanceConfig. -#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. -#' \item minInstances integer, The minimum number of instances of that type in this cluster. -#' \item maxInstances integer, The maximum number of instances of that type in this cluster. -#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. -#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. -#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. -#' \item usageStats object, -#' }} -#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} -#' @export -clusters_patch_kubernetes_partitions <- function(id, cluster_partition_id, instance_configs = NULL, name = NULL, labels = NULL) { - - args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}/partitions/{cluster_partition_id}" - path_params <- list(id = id, cluster_partition_id = cluster_partition_id) - query_params <- list() - body_params <- list(instanceConfigs = instance_configs, name = name, labels = labels) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Delete a Cluster Partition -#' @param id integer required. The ID of the cluster which this partition belongs to. -#' @param cluster_partition_id integer required. The ID of this cluster partition. -#' -#' @return An empty HTTP response +#' \item{exportCachingEnabled}{boolean, Whether or not caching is enabled for export jobs run on this database server.} #' @export -clusters_delete_kubernetes_partitions <- function(id, cluster_partition_id) { +databases_list_advanced_settings <- function(id) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}/partitions/{cluster_partition_id}" - path_params <- list(id = id, cluster_partition_id = cluster_partition_id) + path <- "/databases/{id}/advanced-settings" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Describe a Cluster Partition -#' @param id integer required. The ID of the cluster which this partition belongs to. -#' @param cluster_partition_id integer required. The ID of this cluster partition. -#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. +#' Update the advanced settings for this database +#' @param id integer required. The ID of the database this advanced settings object belongs to. +#' @param export_caching_enabled boolean optional. Whether or not caching is enabled for export jobs run on this database server. #' #' @return A list containing the following elements: -#' \item{clusterPartitionId}{integer, The ID of this cluster partition.} -#' \item{name}{string, The name of the cluster partition.} -#' \item{labels}{array, Labels associated with this partition.} -#' \item{instanceConfigs}{array, An array containing the following fields: -#' \itemize{ -#' \item instanceConfigId integer, The ID of this InstanceConfig. -#' \item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. -#' \item minInstances integer, The minimum number of instances of that type in this cluster. -#' \item maxInstances integer, The maximum number of instances of that type in this cluster. -#' \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. -#' \item instanceMaxCpu integer, The number of processor shares available to a single instance of that type in millicores. -#' \item instanceMaxDisk integer, The amount of disk available to a single instance of that type in gigabytes. -#' \item usageStats object, -#' }} -#' \item{defaultInstanceConfigId}{integer, The id of the InstanceConfig that is the default for this partition.} +#' \item{exportCachingEnabled}{boolean, Whether or not caching is enabled for export jobs run on this database server.} #' @export -clusters_get_kubernetes_partitions <- function(id, cluster_partition_id, include_usage_stats = NULL) { +databases_patch_advanced_settings <- function(id, export_caching_enabled = NULL) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/{id}/partitions/{cluster_partition_id}" - path_params <- list(id = id, cluster_partition_id = cluster_partition_id) - query_params <- list(include_usage_stats = include_usage_stats) - body_params <- list() + path <- "/databases/{id}/advanced-settings" + path_params <- list(id = id) + query_params <- list() + body_params <- list(exportCachingEnabled = export_caching_enabled) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Describe an Instance Config -#' @param instance_config_id integer required. The ID of this instance config. -#' @param include_usage_stats boolean optional. When true, usage stats are returned in instance config objects. Defaults to false. +#' Edit the advanced settings for this database +#' @param id integer required. The ID of the database this advanced settings object belongs to. +#' @param export_caching_enabled boolean required. Whether or not caching is enabled for export jobs run on this database server. #' #' @return A list containing the following elements: -#' \item{instanceConfigId}{integer, The ID of this InstanceConfig.} -#' \item{instanceType}{string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge.} -#' \item{minInstances}{integer, The minimum number of instances of that type in this cluster.} -#' \item{maxInstances}{integer, The maximum number of instances of that type in this cluster.} -#' \item{instanceMaxMemory}{integer, The amount of memory (RAM) available to a single instance of that type in megabytes.} -#' \item{instanceMaxCpu}{integer, The number of processor shares available to a single instance of that type in millicores.} -#' \item{instanceMaxDisk}{integer, The amount of disk available to a single instance of that type in gigabytes.} -#' \item{usageStats}{list, A list containing the following elements: -#' \itemize{ -#' \item pendingMemoryRequested integer, The sum of memory requests (in MB) for pending deployments in this instance config. -#' \item pendingCpuRequested integer, The sum of cpu requests (in millicores) for pending deployments in this instance config. -#' \item runningMemoryRequested integer, The sum of memory requests (in MB) for running deployments in this instance config. -#' \item runningCpuRequested integer, The sum of cpu requests (in millicores) for running deployments in this instance config. -#' \item pendingDeployments integer, The number of pending deployments in this instance config. -#' \item runningDeployments integer, The number of running deployments in this instance config. -#' }} +#' \item{exportCachingEnabled}{boolean, Whether or not caching is enabled for export jobs run on this database server.} #' @export -clusters_get_kubernetes_instance_configs <- function(instance_config_id, include_usage_stats = NULL) { +databases_put_advanced_settings <- function(id, export_caching_enabled) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/instance_configs/{instance_config_id}" - path_params <- list(instance_config_id = instance_config_id) - query_params <- list(include_usage_stats = include_usage_stats) - body_params <- list() + path <- "/databases/{id}/advanced-settings" + path_params <- list(id = id) + query_params <- list() + body_params <- list(exportCachingEnabled = export_caching_enabled) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get statistics about the current users of an Instance Config -#' @param instance_config_id integer required. The ID of this instance config. -#' @param order string optional. The field on which to order the result set. Defaults to running_deployments. Must be one of pending_memory_requested, pending_cpu_requested, running_memory_requested, running_cpu_requested, pending_deployments, running_deployments. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending). Defaults to desc. +#' List API endpoints #' -#' @return An array containing the following fields: -#' \item{userId}{string, The owning user's ID} -#' \item{userName}{string, The owning user's name} -#' \item{pendingDeployments}{integer, The number of deployments belonging to the owning user in "pending" state} -#' \item{pendingMemoryRequested}{integer, The sum of memory requests (in MB) for deployments belonging to the owning user in "pending" state} -#' \item{pendingCpuRequested}{integer, The sum of CPU requests (in millicores) for deployments belonging to the owning user in "pending" state} -#' \item{runningDeployments}{integer, The number of deployments belonging to the owning user in "running" state} -#' \item{runningMemoryRequested}{integer, The sum of memory requests (in MB) for deployments belonging to the owning user in "running" state} -#' \item{runningCpuRequested}{integer, The sum of CPU requests (in millicores) for deployments belonging to the owning user in "running" state} +#' @return An empty HTTP response #' @export -clusters_list_kubernetes_instance_configs_user_statistics <- function(instance_config_id, order = NULL, order_dir = NULL) { +endpoints_list <- function() { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/instance_configs/{instance_config_id}/user_statistics" - path_params <- list(instance_config_id = instance_config_id) - query_params <- list(order = order, order_dir = order_dir) + path <- "/endpoints/" + path_params <- list() + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -2048,59 +2319,223 @@ clusters_list_kubernetes_instance_configs_user_statistics <- function(instance_c } -#' Get graphs of historical resource usage in an Instance Config -#' @param instance_config_id integer required. The ID of this instance config. -#' @param timeframe string optional. The span of time that the graphs cover. Must be one of 1_day, 1_week. +#' Create a Civis Data Match Enhancement +#' @param name string required. The name of the enhancement job. +#' @param input_field_mapping list required. The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}). +#' @param input_table list required. A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' } +#' @param match_target_id integer required. The ID of the Civis Data match target. See /match_targets for IDs. +#' @param output_table list required. A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' } +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param parent_id integer optional. Parent ID that triggers this enhancement. +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param max_matches integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches. +#' @param threshold number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5. +#' @param archived boolean optional. Whether the Civis Data Match Job has been archived. #' #' @return A list containing the following elements: -#' \item{cpuGraphUrl}{string, URL for the graph of historical CPU usage in this instance config.} -#' \item{memGraphUrl}{string, URL for the graph of historical memory usage in this instance config.} +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} +#' \item{inputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} +#' \item{outputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} +#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} +#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} #' @export -clusters_list_kubernetes_instance_configs_historical_graphs <- function(instance_config_id, timeframe = NULL) { +enhancements_post_civis_data_match <- function(name, input_field_mapping, input_table, match_target_id, output_table, schedule = NULL, parent_id = NULL, notifications = NULL, max_matches = NULL, threshold = NULL, archived = NULL) { args <- as.list(match.call())[-1] - path <- "/clusters/kubernetes/instance_configs/{instance_config_id}/historical_graphs" - path_params <- list(instance_config_id = instance_config_id) - query_params <- list(timeframe = timeframe) - body_params <- list() + path <- "/enhancements/civis-data-match/" + path_params <- list() + query_params <- list() + body_params <- list(name = name, inputFieldMapping = input_field_mapping, inputTable = input_table, matchTargetId = match_target_id, outputTable = output_table, schedule = schedule, parentId = parent_id, notifications = notifications, maxMatches = max_matches, threshold = threshold, archived = archived) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List credentials -#' @param type string optional. The type (or types) of credentials to return. One or more of: Amazon Web Services S3, Bitbucket, CASS/NCOA PAF, Certificate, Civis Platform, Custom, Database, Google, Github, Salesforce User, Salesforce Client, and TableauUser. Specify multiple values as a comma-separated list (e.g., "A,B"). -#' @param remote_host_id integer optional. The ID of the remote host associated with the credentials to return. -#' @param default boolean optional. If true, will return a list with a single credential which is the current user's default credential. -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 1000. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, created_at, name. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Get a Civis Data Match Enhancement +#' @param id integer required. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the credential.} -#' \item{name}{string, The name identifying the credential} -#' \item{type}{string, The credential's type.} -#' \item{username}{string, The username for the credential.} -#' \item{description}{string, A long description of the credential.} -#' \item{owner}{string, The name of the user who this credential belongs to.} -#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} -#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} -#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} -#' \item{createdAt}{string, The creation time for this credential.} -#' \item{updatedAt}{string, The last modification time for this credential.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} +#' \item{inputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} +#' \item{outputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} +#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} +#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} #' @export -credentials_list <- function(type = NULL, remote_host_id = NULL, default = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +enhancements_get_civis_data_match <- function(id) { args <- as.list(match.call())[-1] - path <- "/credentials/" - path_params <- list() - query_params <- list(type = type, remote_host_id = remote_host_id, default = default, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/enhancements/civis-data-match/{id}" + path_params <- list(id = id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -2112,113 +2547,430 @@ credentials_list <- function(type = NULL, remote_host_id = NULL, default = NULL, } -#' Create a credential -#' @param type string required. -#' @param username string required. The username for the credential. -#' @param password string required. The password for the credential. -#' @param name string optional. The name identifying the credential. -#' @param description string optional. A long description of the credential. -#' @param remote_host_id integer optional. The ID of the remote host associated with the credential. -#' @param state string optional. The U.S. state for the credential. Only for VAN credentials. -#' @param system_credential boolean optional. +#' Replace all attributes of this Civis Data Match Enhancement +#' @param id integer required. The ID for the enhancement. +#' @param name string required. The name of the enhancement job. +#' @param input_field_mapping list required. The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}). +#' @param input_table list required. A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' } +#' @param match_target_id integer required. The ID of the Civis Data match target. See /match_targets for IDs. +#' @param output_table list required. A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' } +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param parent_id integer optional. Parent ID that triggers this enhancement. +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param max_matches integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches. +#' @param threshold number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5. +#' @param archived boolean optional. Whether the Civis Data Match Job has been archived. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the credential.} -#' \item{name}{string, The name identifying the credential} -#' \item{type}{string, The credential's type.} -#' \item{username}{string, The username for the credential.} -#' \item{description}{string, A long description of the credential.} -#' \item{owner}{string, The name of the user who this credential belongs to.} -#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} -#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} -#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} -#' \item{createdAt}{string, The creation time for this credential.} -#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} +#' \item{inputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} +#' \item{outputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} +#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} +#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} #' @export -credentials_post <- function(type, username, password, name = NULL, description = NULL, remote_host_id = NULL, state = NULL, system_credential = NULL) { +enhancements_put_civis_data_match <- function(id, name, input_field_mapping, input_table, match_target_id, output_table, schedule = NULL, parent_id = NULL, notifications = NULL, max_matches = NULL, threshold = NULL, archived = NULL) { args <- as.list(match.call())[-1] - path <- "/credentials/" - path_params <- list() + path <- "/enhancements/civis-data-match/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list(type = type, username = username, password = password, name = name, description = description, remoteHostId = remote_host_id, state = state, systemCredential = system_credential) + body_params <- list(name = name, inputFieldMapping = input_field_mapping, inputTable = input_table, matchTargetId = match_target_id, outputTable = output_table, schedule = schedule, parentId = parent_id, notifications = notifications, maxMatches = max_matches, threshold = threshold, archived = archived) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Update an existing credential -#' @param id integer required. The ID of the credential. -#' @param type string required. -#' @param username string required. The username for the credential. -#' @param password string required. The password for the credential. -#' @param name string optional. The name identifying the credential. -#' @param description string optional. A long description of the credential. -#' @param remote_host_id integer optional. The ID of the remote host associated with the credential. -#' @param state string optional. The U.S. state for the credential. Only for VAN credentials. -#' @param system_credential boolean optional. +#' Update some attributes of this Civis Data Match Enhancement +#' @param id integer required. The ID for the enhancement. +#' @param name string optional. The name of the enhancement job. +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param parent_id integer optional. Parent ID that triggers this enhancement. +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param input_field_mapping list optional. The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}). +#' @param input_table list optional. A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' } +#' @param match_target_id integer optional. The ID of the Civis Data match target. See /match_targets for IDs. +#' @param output_table list optional. A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' } +#' @param max_matches integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches. +#' @param threshold number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5. +#' @param archived boolean optional. Whether the Civis Data Match Job has been archived. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the credential.} -#' \item{name}{string, The name identifying the credential} -#' \item{type}{string, The credential's type.} -#' \item{username}{string, The username for the credential.} -#' \item{description}{string, A long description of the credential.} -#' \item{owner}{string, The name of the user who this credential belongs to.} -#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} -#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} -#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} -#' \item{createdAt}{string, The creation time for this credential.} -#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} +#' \item{inputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} +#' \item{outputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} +#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} +#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} #' @export -credentials_put <- function(id, type, username, password, name = NULL, description = NULL, remote_host_id = NULL, state = NULL, system_credential = NULL) { +enhancements_patch_civis_data_match <- function(id, name = NULL, schedule = NULL, parent_id = NULL, notifications = NULL, input_field_mapping = NULL, input_table = NULL, match_target_id = NULL, output_table = NULL, max_matches = NULL, threshold = NULL, archived = NULL) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}" + path <- "/enhancements/civis-data-match/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(type = type, username = username, password = password, name = name, description = description, remoteHostId = remote_host_id, state = state, systemCredential = system_credential) + body_params <- list(name = name, schedule = schedule, parentId = parent_id, notifications = notifications, inputFieldMapping = input_field_mapping, inputTable = input_table, matchTargetId = match_target_id, outputTable = output_table, maxMatches = max_matches, threshold = threshold, archived = archived) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Get a credential -#' @param id integer required. The ID of the credential. +#' Clone this Civis Data Match Enhancement +#' @param id integer required. The ID for the enhancement. +#' @param clone_schedule boolean optional. If true, also copy the schedule to the new enhancement. +#' @param clone_triggers boolean optional. If true, also copy the triggers to the new enhancement. +#' @param clone_notifications boolean optional. If true, also copy the notifications to the new enhancement. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the credential.} -#' \item{name}{string, The name identifying the credential} -#' \item{type}{string, The credential's type.} -#' \item{username}{string, The username for the credential.} -#' \item{description}{string, A long description of the credential.} -#' \item{owner}{string, The name of the user who this credential belongs to.} -#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} -#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} -#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} -#' \item{createdAt}{string, The creation time for this credential.} -#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} +#' \item{inputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} +#' \item{outputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} +#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} +#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} #' @export -credentials_get <- function(id) { +enhancements_post_civis_data_match_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}" + path <- "/enhancements/civis-data-match/{id}/clone" + path_params <- list(id = id) + query_params <- list() + body_params <- list(cloneSchedule = clone_schedule, cloneTriggers = clone_triggers, cloneNotifications = clone_notifications) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Start a run +#' @param id integer required. The ID of the civis_data_match. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the run.} +#' \item{civisDataMatchId}{integer, The ID of the civis_data_match.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} +#' @export +enhancements_post_civis_data_match_runs <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/civis-data-match/{id}/runs" path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List runs for the given civis_data_match +#' @param id integer required. The ID of the civis_data_match. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the run.} +#' \item{civisDataMatchId}{integer, The ID of the civis_data_match.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} +#' @export +enhancements_list_civis_data_match_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/civis-data-match/{id}/runs" + path_params <- list(id = id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) @@ -2226,58 +2978,104 @@ credentials_get <- function(id) { } -#' Authenticate against a remote host -#' @param url string required. The URL to your host. -#' @param remote_host_type string required. The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce -#' @param username string required. The username for the credential. -#' @param password string required. The password for the credential. +#' Check status of a run +#' @param id integer required. The ID of the civis_data_match. +#' @param run_id integer required. The ID of the run. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the credential.} -#' \item{name}{string, The name identifying the credential} -#' \item{type}{string, The credential's type.} -#' \item{username}{string, The username for the credential.} -#' \item{description}{string, A long description of the credential.} -#' \item{owner}{string, The name of the user who this credential belongs to.} -#' \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} -#' \item{remoteHostName}{string, The name of the remote host associated with this credential.} -#' \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} -#' \item{createdAt}{string, The creation time for this credential.} -#' \item{updatedAt}{string, The last modification time for this credential.} +#' \item{id}{integer, The ID of the run.} +#' \item{civisDataMatchId}{integer, The ID of the civis_data_match.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -credentials_post_authenticate <- function(url, remote_host_type, username, password) { +enhancements_get_civis_data_match_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/credentials/authenticate" - path_params <- list() + path <- "/enhancements/civis-data-match/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() - body_params <- list(url = url, remoteHostType = remote_host_type, username = username, password = password) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Generate a temporary credential for accessing S3 -#' @param id integer required. The ID of the credential. -#' @param duration integer optional. The number of seconds the temporary credential should be valid. Defaults to 15 minutes. Must not be less than 15 minutes or greater than 36 hours. +#' Cancel a run +#' @param id integer required. The ID of the civis_data_match. +#' @param run_id integer required. The ID of the run. +#' +#' @return An empty HTTP response +#' @export +enhancements_delete_civis_data_match_runs <- function(id, run_id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/civis-data-match/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the logs for a run +#' @param id integer required. The ID of the civis_data_match. +#' @param run_id integer required. The ID of the run. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' @export +enhancements_list_civis_data_match_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/civis-data-match/{id}/runs/{run_id}/logs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(last_id = last_id, limit = limit) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Cancel a run +#' @param id integer required. The ID of the job. #' #' @return A list containing the following elements: -#' \item{accessKey}{string, The identifier of the credential.} -#' \item{secretAccessKey}{string, The secret part of the credential.} -#' \item{sessionToken}{string, The session token identifier.} +#' \item{id}{integer, The ID of the run.} +#' \item{state}{string, The state of the run, one of 'queued', 'running' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} #' @export -credentials_post_temporary <- function(id, duration = NULL) { +enhancements_post_civis_data_match_cancel <- function(id) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}/temporary" + path <- "/enhancements/civis-data-match/{id}/cancel" path_params <- list(id = id) query_params <- list() - body_params <- list(duration = duration) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -2288,6 +3086,38 @@ credentials_post_temporary <- function(id, duration = NULL) { } +#' List the outputs for a run +#' @param id integer required. The ID of the job. +#' @param run_id integer required. The ID of the run. +#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, The type of the output. Valid values are File, Table, Report, Project, Credential, or JSONValue} +#' \item{objectId}{integer, The ID of the output.} +#' \item{name}{string, The name of the output.} +#' \item{link}{string, The hypermedia link to the output.} +#' \item{value}{string, } +#' @export +enhancements_list_civis_data_match_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/civis-data-match/{id}/runs/{run_id}/outputs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List users and groups permissioned on this object #' @param id integer required. The ID of the resource that is shared. #' @@ -2310,10 +3140,10 @@ credentials_post_temporary <- function(id, duration = NULL) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -credentials_list_shares <- function(id) { +enhancements_list_civis_data_match_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}/shares" + path <- "/enhancements/civis-data-match/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -2353,10 +3183,10 @@ credentials_list_shares <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -credentials_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +enhancements_put_civis_data_match_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}/shares/users" + path <- "/enhancements/civis-data-match/{id}/shares/users" path_params <- list(id = id) query_params <- list() body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -2376,10 +3206,10 @@ credentials_put_shares_users <- function(id, user_ids, permission_level, share_e #' #' @return An empty HTTP response #' @export -credentials_delete_shares_users <- function(id, user_id) { +enhancements_delete_civis_data_match_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}/shares/users/{user_id}" + path <- "/enhancements/civis-data-match/{id}/shares/users/{user_id}" path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() @@ -2419,10 +3249,10 @@ credentials_delete_shares_users <- function(id, user_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -credentials_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +enhancements_put_civis_data_match_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}/shares/groups" + path <- "/enhancements/civis-data-match/{id}/shares/groups" path_params <- list(id = id) query_params <- list() body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -2442,10 +3272,10 @@ credentials_put_shares_groups <- function(id, group_ids, permission_level, share #' #' @return An empty HTTP response #' @export -credentials_delete_shares_groups <- function(id, group_id) { +enhancements_delete_civis_data_match_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/credentials/{id}/shares/groups/{group_id}" + path <- "/enhancements/civis-data-match/{id}/shares/groups/{group_id}" path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() @@ -2459,67 +3289,24 @@ credentials_delete_shares_groups <- function(id, group_id) { } -#' List databases +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for the database.} -#' \item{name}{string, The name of the database.} -#' \item{adapter}{string, The type of the database.} +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -databases_list <- function() { +enhancements_list_civis_data_match_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/databases/" - path_params <- list() - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Show database information -#' @param id integer required. The ID for the database. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the database.} -#' \item{name}{string, The name of the database.} -#' \item{adapter}{string, The type of the database.} -#' @export -databases_get <- function(id) { - - args <- as.list(match.call())[-1] - path <- "/databases/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' List schemas in this database -#' @param id integer required. The ID of the database. -#' -#' @return An array containing the following fields: -#' \item{schema}{string, The name of a schema.} -#' @export -databases_list_schemas <- function(id) { - - args <- as.list(match.call())[-1] - path <- "/databases/{id}/schemas" + path <- "/enhancements/civis-data-match/{id}/dependencies" path_params <- list(id = id) - query_params <- list() + query_params <- list(user_id = user_id) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -2531,104 +3318,109 @@ databases_list_schemas <- function(id) { } -#' Creates and enqueues a schema scanner job -#' @param id integer required. The ID of the database. -#' @param schema string required. The name of the schema. -#' @param stats_priority string optional. When to sync table statistics for every table in the schema. Valid options are the following. Option: 'flag' means to flag stats for the next scheduled run of a full table scan on the database. Option: 'block' means to block this job on stats syncing. Option: 'queue' means to queue a separate job for syncing stats and do not block this job on the queued job. Defaults to 'flag' +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' #' @return A list containing the following elements: -#' \item{jobId}{integer, The ID of the job created.} -#' \item{runId}{integer, The ID of the run created.} +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} #' @export -databases_post_schemas_scan <- function(id, schema, stats_priority = NULL) { +enhancements_put_civis_data_match_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/databases/{id}/schemas/scan" + path <- "/enhancements/civis-data-match/{id}/transfer" path_params <- list(id = id) query_params <- list() - body_params <- list(schema = schema, statsPriority = stats_priority) + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' List tables in the specified database, deprecated use "GET /tables" instead -#' @param id integer required. The ID of the database. -#' @param name string optional. If specified, will be used to filter the tables returned. Substring matching is supported (e.g., "name=table" will return both "table1" and "my table"). -#' @param limit integer optional. Number of results to return. Defaults to 200. Maximum allowed is 1000. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to name. Must be one of: name. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the table.} -#' \item{databaseId}{integer, The ID of the database.} -#' \item{schema}{string, The name of the schema containing the table.} -#' \item{name}{string, Name of the table.} -#' \item{description}{string, The description of the table, as specified by the table owner} -#' \item{isView}{boolean, True if this table represents a view. False if it represents a regular table.} -#' \item{rowCount}{integer, The number of rows in the table.} -#' \item{columnCount}{integer, The number of columns in the table.} -#' \item{sizeMb}{number, The size of the table in megabytes.} -#' \item{owner}{string, The database username of the table's owner.} -#' \item{distkey}{string, The column used as the Amazon Redshift distkey.} -#' \item{sortkeys}{string, The column used as the Amazon Redshift sortkey.} -#' \item{refreshStatus}{string, How up-to-date the table's statistics on row counts, null counts, distinct counts, and values distributions are. One of: refreshing, stale, or current.} -#' \item{lastRefresh}{string, The time of the last statistics refresh.} -#' \item{refreshId}{string, The ID of the most recent statistics refresh.} -#' \item{lastRun}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' @export -databases_list_tables <- function(id, name = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { - - args <- as.list(match.call())[-1] - path <- "/databases/{id}/tables" - path_params <- list(id = id) - query_params <- list(name = name, limit = limit, page_num = page_num, order = order, order_dir = order_dir) - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' List tables in the specified database, deprecated use "GET /tables" instead -#' @param id integer required. The ID of the database. -#' @param name string optional. If specified, will be used to filter the tables returned. Substring matching is supported (e.g., "name=table" will return both "table1" and "my table"). -#' @param column_name string optional. Search for tables containing a column with the given name. -#' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the table.} -#' \item{databaseId}{integer, The ID of the database.} -#' \item{schema}{string, The name of the schema containing the table.} -#' \item{name}{string, Name of the table.} -#' \item{description}{string, The description of the table, as specified by the table owner} -#' \item{isView}{boolean, True if this table represents a view. False if it represents a regular table.} -#' \item{rowCount}{integer, The number of rows in the table.} -#' \item{columnCount}{integer, The number of columns in the table.} -#' \item{sizeMb}{number, The size of the table in megabytes.} -#' \item{owner}{string, The database username of the table's owner.} -#' \item{distkey}{string, The column used as the Amazon Redshift distkey.} -#' \item{sortkeys}{string, The column used as the Amazon Redshift sortkey.} -#' \item{refreshStatus}{string, How up-to-date the table's statistics on row counts, null counts, distinct counts, and values distributions are. One of: refreshing, stale, or current.} -#' \item{lastRefresh}{string, The time of the last statistics refresh.} -#' \item{refreshId}{string, The ID of the most recent statistics refresh.} +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} +#' \item{inputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} +#' \item{outputTable}{list, A list containing the following elements: +#' \itemize{ +#' \item databaseName string, The Redshift database name for the table. +#' \item schema string, The schema name for the table. +#' \item table string, The table name. +#' }} +#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} +#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} +#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} #' \item{lastRun}{list, A list containing the following elements: #' \itemize{ #' \item id integer, @@ -2638,42 +3430,59 @@ databases_list_tables <- function(id, name = NULL, limit = NULL, page_num = NULL #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} -#' \item{columnNames}{array, The names of each column in the table.} #' @export -databases_list_tables_search <- function(id, name = NULL, column_name = NULL) { +enhancements_put_civis_data_match_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/databases/{id}/tables-search" + path <- "/enhancements/civis-data-match/{id}/archive" path_params <- list(id = id) - query_params <- list(name = name, column_name = column_name) - body_params <- list() + query_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' List whitelisted IPs for the specified database -#' @param id integer required. The ID for the database. +#' List the projects a Civis Data Match Enhancement belongs to +#' @param id integer required. The ID of the Civis Data Match Enhancement. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of this whitelisted IP address.} -#' \item{remoteHostId}{integer, The ID of the database this rule is applied to.} -#' \item{securityGroupId}{string, The ID of the security group this rule is applied to.} -#' \item{subnetMask}{string, The subnet mask that is allowed by this rule.} -#' \item{createdAt}{string, The time this rule was created.} -#' \item{updatedAt}{string, The time this rule was last updated.} +#' \item{id}{integer, The ID for this project.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -databases_list_whitelist_ips <- function(id) { +enhancements_list_civis_data_match_projects <- function(id, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/databases/{id}/whitelist-ips" + path <- "/enhancements/civis-data-match/{id}/projects" path_params <- list(id = id) - query_params <- list() + query_params <- list(hidden = hidden) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -2685,79 +3494,40 @@ databases_list_whitelist_ips <- function(id) { } -#' Whitelist an IP address -#' @param id integer required. The ID of the database this rule is applied to. -#' @param subnet_mask string required. The subnet mask that is allowed by this rule. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this whitelisted IP address.} -#' \item{remoteHostId}{integer, The ID of the database this rule is applied to.} -#' \item{securityGroupId}{string, The ID of the security group this rule is applied to.} -#' \item{subnetMask}{string, The subnet mask that is allowed by this rule.} -#' \item{authorizedBy}{string, The user who authorized this rule.} -#' \item{isActive}{boolean, True if the rule is applied, false if it has been revoked.} -#' \item{createdAt}{string, The time this rule was created.} -#' \item{updatedAt}{string, The time this rule was last updated.} -#' @export -databases_post_whitelist_ips <- function(id, subnet_mask) { - - args <- as.list(match.call())[-1] - path <- "/databases/{id}/whitelist-ips" - path_params <- list(id = id) - query_params <- list() - body_params <- list(subnetMask = subnet_mask) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' View details about a whitelisted IP -#' @param id integer required. The ID of the database this rule is applied to. -#' @param whitelisted_ip_id integer required. The ID of this whitelisted IP address. +#' Add a Civis Data Match Enhancement to a project +#' @param id integer required. The ID of the Civis Data Match Enhancement. +#' @param project_id integer required. The ID of the project. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this whitelisted IP address.} -#' \item{remoteHostId}{integer, The ID of the database this rule is applied to.} -#' \item{securityGroupId}{string, The ID of the security group this rule is applied to.} -#' \item{subnetMask}{string, The subnet mask that is allowed by this rule.} -#' \item{authorizedBy}{string, The user who authorized this rule.} -#' \item{isActive}{boolean, True if the rule is applied, false if it has been revoked.} -#' \item{createdAt}{string, The time this rule was created.} -#' \item{updatedAt}{string, The time this rule was last updated.} +#' @return An empty HTTP response #' @export -databases_get_whitelist_ips <- function(id, whitelisted_ip_id) { +enhancements_put_civis_data_match_projects <- function(id, project_id) { args <- as.list(match.call())[-1] - path <- "/databases/{id}/whitelist-ips/{whitelisted_ip_id}" - path_params <- list(id = id, whitelisted_ip_id = whitelisted_ip_id) + path <- "/enhancements/civis-data-match/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Remove a whitelisted IP address -#' @param id integer required. The ID of the database this rule is applied to. -#' @param whitelisted_ip_id integer required. The ID of this whitelisted IP address. +#' Remove a Civis Data Match Enhancement from a project +#' @param id integer required. The ID of the Civis Data Match Enhancement. +#' @param project_id integer required. The ID of the project. #' #' @return An empty HTTP response #' @export -databases_delete_whitelist_ips <- function(id, whitelisted_ip_id) { +enhancements_delete_civis_data_match_projects <- function(id, project_id) { args <- as.list(match.call())[-1] - path <- "/databases/{id}/whitelist-ips/{whitelisted_ip_id}" - path_params <- list(id = id, whitelisted_ip_id = whitelisted_ip_id) + path <- "/enhancements/civis-data-match/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -2770,17 +3540,16 @@ databases_delete_whitelist_ips <- function(id, whitelisted_ip_id) { } -#' Get the advanced settings for this database -#' @param id integer required. The ID of the database this advanced settings object belongs to. +#' List available enhancement types #' -#' @return A list containing the following elements: -#' \item{exportCachingEnabled}{boolean, Whether or not caching is enabled for export jobs run on this database server.} +#' @return An array containing the following fields: +#' \item{name}{string, The name of the type.} #' @export -databases_list_advanced_settings <- function(id) { +enhancements_list_types <- function() { args <- as.list(match.call())[-1] - path <- "/databases/{id}/advanced-settings" - path_params <- list(id = id) + path <- "/enhancements/types" + path_params <- list() query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -2793,64 +3562,62 @@ databases_list_advanced_settings <- function(id) { } -#' Update the advanced settings for this database -#' @param id integer required. The ID of the database this advanced settings object belongs to. -#' @param export_caching_enabled boolean optional. Whether or not caching is enabled for export jobs run on this database server. -#' -#' @return A list containing the following elements: -#' \item{exportCachingEnabled}{boolean, Whether or not caching is enabled for export jobs run on this database server.} -#' @export -databases_patch_advanced_settings <- function(id, export_caching_enabled = NULL) { - - args <- as.list(match.call())[-1] - path <- "/databases/{id}/advanced-settings" - path_params <- list(id = id) - query_params <- list() - body_params <- list(exportCachingEnabled = export_caching_enabled) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Edit the advanced settings for this database -#' @param id integer required. The ID of the database this advanced settings object belongs to. -#' @param export_caching_enabled boolean required. Whether or not caching is enabled for export jobs run on this database server. +#' List the fields in a field mapping for Civis Data Match, Data Unification, and Table Deduplication jobs #' -#' @return A list containing the following elements: -#' \item{exportCachingEnabled}{boolean, Whether or not caching is enabled for export jobs run on this database server.} +#' @return An array containing the following fields: +#' \item{field}{string, The name of the field.} +#' \item{description}{string, The description of the field.} #' @export -databases_put_advanced_settings <- function(id, export_caching_enabled) { +enhancements_list_field_mapping <- function() { args <- as.list(match.call())[-1] - path <- "/databases/{id}/advanced-settings" - path_params <- list(id = id) + path <- "/enhancements/field-mapping" + path_params <- list() query_params <- list() - body_params <- list(exportCachingEnabled = export_caching_enabled) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List API endpoints +#' List Enhancements +#' @param type string optional. If specified, return items of these types. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. +#' @param status string optional. If specified, returns items with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. +#' @param archived string optional. The archival status of the requested item(s). +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{state}{string, The status of the enhancement's last run} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -endpoints_list <- function() { +enhancements_list <- function(type = NULL, author = NULL, status = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/endpoints/" + path <- "/enhancements/" path_params <- list() - query_params <- list() + query_params <- list(type = type, author = author, status = status, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -2862,29 +3629,27 @@ endpoints_list <- function() { } -#' Create a Civis Data Match Enhancement +#' Create a CASS/NCOA Enhancement #' @param name string required. The name of the enhancement job. -#' @param input_field_mapping list required. The column mapping for the input table. See /enhancements/field_mapping for list of valid fields. -#' @param input_table list required. A list containing the following elements: +#' @param source list required. A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' } -#' @param match_target_id integer required. The ID of the Civis Data match target. See /match_targets for IDs. -#' @param output_table list required. A list containing the following elements: +#' \item databaseTable list . A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item schema string, The schema name of the source table. +#' \item table string, The name of the source table. +#' \item remoteHostId integer, The ID of the database host for the table. +#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. +#' \item multipartKey array, The source table primary key. +#' } #' } #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param parent_id integer optional. Parent ID that triggers this enhancement. #' @param notifications list optional. A list containing the following elements: @@ -2900,9 +3665,30 @@ endpoints_list <- function() { #' \item successOn boolean, If success email notifications are on. #' \item failureOn boolean, If failure email notifications are on. #' } -#' @param max_matches integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches. -#' @param threshold number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. -#' @param archived boolean optional. Whether the Civis Data Match Job has been archived. +#' @param destination list optional. A list containing the following elements: +#' \itemize{ +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name for the output data. +#' \item table string, The table name for the output data. +#' } +#' } +#' @param column_mapping list optional. A list containing the following elements: +#' \itemize{ +#' \item address1 string, The first address line. +#' \item address2 string, The second address line. +#' \item city string, The city of an address. +#' \item state string, The state of an address. +#' \item zip string, The zip code of an address. +#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` +#' \item company string, The name of the company located at this address. +#' } +#' @param use_default_column_mapping boolean optional. Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided. +#' @param perform_ncoa boolean optional. Whether to update addresses for records matching the National Change of Address (NCOA) database. +#' @param ncoa_credential_id integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true. +#' @param output_level string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned. +#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' @param chunk_size integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the enhancement.} @@ -2922,10 +3708,11 @@ endpoints_list <- function() { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{parentId}{integer, Parent ID that triggers this enhancement.} #' \item{notifications}{list, A list containing the following elements: @@ -2949,40 +3736,51 @@ endpoints_list <- function() { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} -#' \item{inputTable}{list, A list containing the following elements: +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name of the source table. +#' \item table string, The name of the source table. +#' \item remoteHostId integer, The ID of the database host for the table. +#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. +#' \item multipartKey array, The source table primary key. +#' } #' }} -#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} -#' \item{outputTable}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name for the output data. +#' \item table string, The table name for the output data. +#' } #' }} -#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} -#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} -#' \item{lastRun}{list, A list containing the following elements: +#' \item{columnMapping}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item address1 string, The first address line. +#' \item address2 string, The second address line. +#' \item city string, The city of an address. +#' \item state string, The state of an address. +#' \item zip string, The zip code of an address. +#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` +#' \item company string, The name of the company located at this address. #' }} +#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} +#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} +#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} +#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_post_civis_data_match <- function(name, input_field_mapping, input_table, match_target_id, output_table, schedule = NULL, parent_id = NULL, notifications = NULL, max_matches = NULL, threshold = NULL, archived = NULL) { +enhancements_post_cass_ncoa <- function(name, source, schedule = NULL, parent_id = NULL, notifications = NULL, destination = NULL, column_mapping = NULL, use_default_column_mapping = NULL, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, limiting_sql = NULL, chunk_size = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/" + path <- "/enhancements/cass-ncoa" path_params <- list() query_params <- list() - body_params <- list(name = name, inputFieldMapping = input_field_mapping, inputTable = input_table, matchTargetId = match_target_id, outputTable = output_table, schedule = schedule, parentId = parent_id, notifications = notifications, maxMatches = max_matches, threshold = threshold, archived = archived) + body_params <- list(name = name, source = source, schedule = schedule, parentId = parent_id, notifications = notifications, destination = destination, columnMapping = column_mapping, useDefaultColumnMapping = use_default_column_mapping, performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level, limitingSQL = limiting_sql, chunkSize = chunk_size) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -2993,7 +3791,7 @@ enhancements_post_civis_data_match <- function(name, input_field_mapping, input_ } -#' Get a Civis Data Match Enhancement +#' Get a CASS/NCOA Enhancement #' @param id integer required. #' #' @return A list containing the following elements: @@ -3014,10 +3812,11 @@ enhancements_post_civis_data_match <- function(name, input_field_mapping, input_ #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{parentId}{integer, Parent ID that triggers this enhancement.} #' \item{notifications}{list, A list containing the following elements: @@ -3041,37 +3840,48 @@ enhancements_post_civis_data_match <- function(name, input_field_mapping, input_ #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} -#' \item{inputTable}{list, A list containing the following elements: +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' }} -#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} -#' \item{outputTable}{list, A list containing the following elements: +#' \item databaseTable list . A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item schema string, The schema name of the source table. +#' \item table string, The name of the source table. +#' \item remoteHostId integer, The ID of the database host for the table. +#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. +#' \item multipartKey array, The source table primary key. +#' } #' }} -#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} -#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} -#' \item{lastRun}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name for the output data. +#' \item table string, The table name for the output data. +#' } +#' }} +#' \item{columnMapping}{list, A list containing the following elements: +#' \itemize{ +#' \item address1 string, The first address line. +#' \item address2 string, The second address line. +#' \item city string, The city of an address. +#' \item state string, The state of an address. +#' \item zip string, The zip code of an address. +#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` +#' \item company string, The name of the company located at this address. #' }} +#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} +#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} +#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} +#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_get_civis_data_match <- function(id) { +enhancements_get_cass_ncoa <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}" + path <- "/enhancements/cass-ncoa/{id}" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -3085,30 +3895,28 @@ enhancements_get_civis_data_match <- function(id) { } -#' Replace all attributes of this Civis Data Match Enhancement +#' Replace all attributes of this CASS/NCOA Enhancement #' @param id integer required. The ID for the enhancement. #' @param name string required. The name of the enhancement job. -#' @param input_field_mapping list required. The column mapping for the input table. See /enhancements/field_mapping for list of valid fields. -#' @param input_table list required. A list containing the following elements: +#' @param source list required. A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' } -#' @param match_target_id integer required. The ID of the Civis Data match target. See /match_targets for IDs. -#' @param output_table list required. A list containing the following elements: +#' \item databaseTable list . A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item schema string, The schema name of the source table. +#' \item table string, The name of the source table. +#' \item remoteHostId integer, The ID of the database host for the table. +#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. +#' \item multipartKey array, The source table primary key. +#' } #' } #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param parent_id integer optional. Parent ID that triggers this enhancement. #' @param notifications list optional. A list containing the following elements: @@ -3124,9 +3932,30 @@ enhancements_get_civis_data_match <- function(id) { #' \item successOn boolean, If success email notifications are on. #' \item failureOn boolean, If failure email notifications are on. #' } -#' @param max_matches integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches. -#' @param threshold number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. -#' @param archived boolean optional. Whether the Civis Data Match Job has been archived. +#' @param destination list optional. A list containing the following elements: +#' \itemize{ +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name for the output data. +#' \item table string, The table name for the output data. +#' } +#' } +#' @param column_mapping list optional. A list containing the following elements: +#' \itemize{ +#' \item address1 string, The first address line. +#' \item address2 string, The second address line. +#' \item city string, The city of an address. +#' \item state string, The state of an address. +#' \item zip string, The zip code of an address. +#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` +#' \item company string, The name of the company located at this address. +#' } +#' @param use_default_column_mapping boolean optional. Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided. +#' @param perform_ncoa boolean optional. Whether to update addresses for records matching the National Change of Address (NCOA) database. +#' @param ncoa_credential_id integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true. +#' @param output_level string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned. +#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' @param chunk_size integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the enhancement.} @@ -3146,10 +3975,11 @@ enhancements_get_civis_data_match <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{parentId}{integer, Parent ID that triggers this enhancement.} #' \item{notifications}{list, A list containing the following elements: @@ -3173,40 +4003,51 @@ enhancements_get_civis_data_match <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} -#' \item{inputTable}{list, A list containing the following elements: +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name of the source table. +#' \item table string, The name of the source table. +#' \item remoteHostId integer, The ID of the database host for the table. +#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. +#' \item multipartKey array, The source table primary key. +#' } #' }} -#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} -#' \item{outputTable}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name for the output data. +#' \item table string, The table name for the output data. +#' } #' }} -#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} -#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} -#' \item{lastRun}{list, A list containing the following elements: +#' \item{columnMapping}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item address1 string, The first address line. +#' \item address2 string, The second address line. +#' \item city string, The city of an address. +#' \item state string, The state of an address. +#' \item zip string, The zip code of an address. +#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` +#' \item company string, The name of the company located at this address. #' }} +#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} +#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} +#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} +#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_put_civis_data_match <- function(id, name, input_field_mapping, input_table, match_target_id, output_table, schedule = NULL, parent_id = NULL, notifications = NULL, max_matches = NULL, threshold = NULL, archived = NULL) { +enhancements_put_cass_ncoa <- function(id, name, source, schedule = NULL, parent_id = NULL, notifications = NULL, destination = NULL, column_mapping = NULL, use_default_column_mapping = NULL, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, limiting_sql = NULL, chunk_size = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}" + path <- "/enhancements/cass-ncoa/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, inputFieldMapping = input_field_mapping, inputTable = input_table, matchTargetId = match_target_id, outputTable = output_table, schedule = schedule, parentId = parent_id, notifications = notifications, maxMatches = max_matches, threshold = threshold, archived = archived) + body_params <- list(name = name, source = source, schedule = schedule, parentId = parent_id, notifications = notifications, destination = destination, columnMapping = column_mapping, useDefaultColumnMapping = use_default_column_mapping, performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level, limitingSQL = limiting_sql, chunkSize = chunk_size) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -3217,16 +4058,17 @@ enhancements_put_civis_data_match <- function(id, name, input_field_mapping, inp } -#' Update some attributes of this Civis Data Match Enhancement +#' Update some attributes of this CASS/NCOA Enhancement #' @param id integer required. The ID for the enhancement. #' @param name string optional. The name of the enhancement job. #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param parent_id integer optional. Parent ID that triggers this enhancement. #' @param notifications list optional. A list containing the following elements: @@ -3242,118 +4084,41 @@ enhancements_put_civis_data_match <- function(id, name, input_field_mapping, inp #' \item successOn boolean, If success email notifications are on. #' \item failureOn boolean, If failure email notifications are on. #' } -#' @param input_field_mapping list optional. The column mapping for the input table. See /enhancements/field_mapping for list of valid fields. -#' @param input_table list optional. A list containing the following elements: +#' @param source list optional. A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' } -#' @param match_target_id integer optional. The ID of the Civis Data match target. See /match_targets for IDs. -#' @param output_table list optional. A list containing the following elements: +#' \item databaseTable list . A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item schema string, The schema name of the source table. +#' \item table string, The name of the source table. +#' \item remoteHostId integer, The ID of the database host for the table. +#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. +#' \item multipartKey array, The source table primary key. #' } -#' @param max_matches integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches. -#' @param threshold number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. -#' @param archived boolean optional. Whether the Civis Data Match Job has been archived. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{runningAs}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} -#' \item{inputTable}{list, A list containing the following elements: +#' } +#' @param destination list optional. A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' }} -#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} -#' \item{outputTable}{list, A list containing the following elements: +#' \item databaseTable list . A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' }} -#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} -#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} -#' \item{lastRun}{list, A list containing the following elements: +#' \item schema string, The schema name for the output data. +#' \item table string, The table name for the output data. +#' } +#' } +#' @param column_mapping list optional. A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' @export -enhancements_patch_civis_data_match <- function(id, name = NULL, schedule = NULL, parent_id = NULL, notifications = NULL, input_field_mapping = NULL, input_table = NULL, match_target_id = NULL, output_table = NULL, max_matches = NULL, threshold = NULL, archived = NULL) { - - args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list(name = name, schedule = schedule, parentId = parent_id, notifications = notifications, inputFieldMapping = input_field_mapping, inputTable = input_table, matchTargetId = match_target_id, outputTable = output_table, maxMatches = max_matches, threshold = threshold, archived = archived) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Clone this Civis Data Match Enhancement -#' @param id integer required. The ID for the enhancement. -#' @param clone_schedule boolean optional. If true, also copy the schedule to the new enhancement. -#' @param clone_triggers boolean optional. If true, also copy the triggers to the new enhancement. -#' @param clone_notifications boolean optional. If true, also copy the notifications to the new enhancement. +#' \item address1 string, The first address line. +#' \item address2 string, The second address line. +#' \item city string, The city of an address. +#' \item state string, The state of an address. +#' \item zip string, The zip code of an address. +#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` +#' \item company string, The name of the company located at this address. +#' } +#' @param use_default_column_mapping boolean optional. Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided. +#' @param perform_ncoa boolean optional. Whether to update addresses for records matching the National Change of Address (NCOA) database. +#' @param ncoa_credential_id integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true. +#' @param output_level string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned. +#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' @param chunk_size integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the enhancement.} @@ -3373,10 +4138,11 @@ enhancements_patch_civis_data_match <- function(id, name = NULL, schedule = NULL #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{parentId}{integer, Parent ID that triggers this enhancement.} #' \item{notifications}{list, A list containing the following elements: @@ -3400,44 +4166,55 @@ enhancements_patch_civis_data_match <- function(id, name = NULL, schedule = NULL #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} -#' \item{inputTable}{list, A list containing the following elements: +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name of the source table. +#' \item table string, The name of the source table. +#' \item remoteHostId integer, The ID of the database host for the table. +#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. +#' \item multipartKey array, The source table primary key. +#' } #' }} -#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} -#' \item{outputTable}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The schema name for the output data. +#' \item table string, The table name for the output data. +#' } #' }} -#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} -#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} -#' \item{lastRun}{list, A list containing the following elements: +#' \item{columnMapping}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item address1 string, The first address line. +#' \item address2 string, The second address line. +#' \item city string, The city of an address. +#' \item state string, The state of an address. +#' \item zip string, The zip code of an address. +#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` +#' \item company string, The name of the company located at this address. #' }} +#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} +#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} +#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} +#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_post_civis_data_match_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { +enhancements_patch_cass_ncoa <- function(id, name = NULL, schedule = NULL, parent_id = NULL, notifications = NULL, source = NULL, destination = NULL, column_mapping = NULL, use_default_column_mapping = NULL, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, limiting_sql = NULL, chunk_size = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/clone" + path <- "/enhancements/cass-ncoa/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(cloneSchedule = clone_schedule, cloneTriggers = clone_triggers, cloneNotifications = clone_notifications) + body_params <- list(name = name, schedule = schedule, parentId = parent_id, notifications = notifications, source = source, destination = destination, columnMapping = column_mapping, useDefaultColumnMapping = use_default_column_mapping, performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level, limitingSQL = limiting_sql, chunkSize = chunk_size) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) @@ -3445,21 +4222,22 @@ enhancements_post_civis_data_match_clone <- function(id, clone_schedule = NULL, #' Start a run -#' @param id integer required. The ID of the civis_data_match. +#' @param id integer required. The ID of the cass_ncoa. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID of the run.} -#' \item{civisDataMatchId}{integer, The ID of the civis_data_match.} +#' \item{cassNcoaId}{integer, The ID of the cass_ncoa.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export -enhancements_post_civis_data_match_runs <- function(id) { +enhancements_post_cass_ncoa_runs <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/runs" + path <- "/enhancements/cass-ncoa/{id}/runs" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -3473,8 +4251,8 @@ enhancements_post_civis_data_match_runs <- function(id) { } -#' List runs for the given civis_data_match -#' @param id integer required. The ID of the civis_data_match. +#' List runs for the given cass_ncoa +#' @param id integer required. The ID of the cass_ncoa. #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. #' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. @@ -3482,17 +4260,18 @@ enhancements_post_civis_data_match_runs <- function(id) { #' #' @return An array containing the following fields: #' \item{id}{integer, The ID of the run.} -#' \item{civisDataMatchId}{integer, The ID of the civis_data_match.} +#' \item{cassNcoaId}{integer, The ID of the cass_ncoa.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export -enhancements_list_civis_data_match_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +enhancements_list_cass_ncoa_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/runs" + path <- "/enhancements/cass-ncoa/{id}/runs" path_params <- list(id = id) query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() @@ -3507,22 +4286,23 @@ enhancements_list_civis_data_match_runs <- function(id, limit = NULL, page_num = #' Check status of a run -#' @param id integer required. The ID of the civis_data_match. +#' @param id integer required. The ID of the cass_ncoa. #' @param run_id integer required. The ID of the run. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID of the run.} -#' \item{civisDataMatchId}{integer, The ID of the civis_data_match.} +#' \item{cassNcoaId}{integer, The ID of the cass_ncoa.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export -enhancements_get_civis_data_match_runs <- function(id, run_id) { +enhancements_get_cass_ncoa_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/runs/{run_id}" + path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}" path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() @@ -3537,15 +4317,15 @@ enhancements_get_civis_data_match_runs <- function(id, run_id) { #' Cancel a run -#' @param id integer required. The ID of the civis_data_match. +#' @param id integer required. The ID of the cass_ncoa. #' @param run_id integer required. The ID of the run. #' #' @return An empty HTTP response #' @export -enhancements_delete_civis_data_match_runs <- function(id, run_id) { +enhancements_delete_cass_ncoa_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/runs/{run_id}" + path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}" path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() @@ -3560,7 +4340,7 @@ enhancements_delete_civis_data_match_runs <- function(id, run_id) { #' Get the logs for a run -#' @param id integer required. The ID of the civis_data_match. +#' @param id integer required. The ID of the cass_ncoa. #' @param run_id integer required. The ID of the run. #' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. #' @param limit integer optional. The maximum number of log messages to return. Default of 10000. @@ -3571,10 +4351,10 @@ enhancements_delete_civis_data_match_runs <- function(id, run_id) { #' \item{message}{string, The log message.} #' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} #' @export -enhancements_list_civis_data_match_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { +enhancements_list_cass_ncoa_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/runs/{run_id}/logs" + path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}/logs" path_params <- list(id = id, run_id = run_id) query_params <- list(last_id = last_id, limit = limit) body_params <- list() @@ -3596,10 +4376,10 @@ enhancements_list_civis_data_match_runs_logs <- function(id, run_id, last_id = N #' \item{state}{string, The state of the run, one of 'queued', 'running' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} #' @export -enhancements_post_civis_data_match_cancel <- function(id) { +enhancements_post_cass_ncoa_cancel <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/cancel" + path <- "/enhancements/cass-ncoa/{id}/cancel" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -3628,10 +4408,10 @@ enhancements_post_civis_data_match_cancel <- function(id) { #' \item{link}{string, The hypermedia link to the output.} #' \item{value}{string, } #' @export -enhancements_list_civis_data_match_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +enhancements_list_cass_ncoa_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/runs/{run_id}/outputs" + path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}/outputs" path_params <- list(id = id, run_id = run_id) query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() @@ -3645,144 +4425,300 @@ enhancements_list_civis_data_match_runs_outputs <- function(id, run_id, limit = } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' Create a Geocode Enhancement +#' @param name string required. The name of the enhancement job. +#' @param remote_host_id integer required. The ID of the remote host. +#' @param credential_id integer required. The ID of the remote host credential. +#' @param source_schema_and_table string required. The source database schema and table. +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param parent_id integer optional. Parent ID that triggers this enhancement. +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param multipart_key array optional. The source table primary key. +#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' @param target_schema string optional. The output table schema. +#' @param target_table string optional. The output table name. +#' @param country string optional. The country of the addresses to be geocoded; either 'us' or 'ca'. +#' @param provider string optional. The geocoding provider; one of postgis, nominatim, and geocoder_ca. +#' @param output_address boolean optional. Whether to output the parsed address. Only guaranteed for the 'postgis' provider. #' -#' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{remoteHostId}{integer, The ID of the remote host.} +#' \item{credentialId}{integer, The ID of the remote host credential.} +#' \item{sourceSchemaAndTable}{string, The source database schema and table.} +#' \item{multipartKey}{array, The source table primary key.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{targetSchema}{string, The output table schema.} +#' \item{targetTable}{string, The output table name.} +#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} +#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} +#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_list_civis_data_match_shares <- function(id) { +enhancements_post_geocode <- function(name, remote_host_id, credential_id, source_schema_and_table, schedule = NULL, parent_id = NULL, notifications = NULL, multipart_key = NULL, limiting_sql = NULL, target_schema = NULL, target_table = NULL, country = NULL, provider = NULL, output_address = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/shares" - path_params <- list(id = id) + path <- "/enhancements/geocode" + path_params <- list() query_params <- list() - body_params <- list() + body_params <- list(name = name, remoteHostId = remote_host_id, credentialId = credential_id, sourceSchemaAndTable = source_schema_and_table, schedule = schedule, parentId = parent_id, notifications = notifications, multipartKey = multipart_key, limitingSQL = limiting_sql, targetSchema = target_schema, targetTable = target_table, country = country, provider = provider, outputAddress = output_address) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Get a Geocode Enhancement +#' @param id integer required. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{remoteHostId}{integer, The ID of the remote host.} +#' \item{credentialId}{integer, The ID of the remote host credential.} +#' \item{sourceSchemaAndTable}{string, The source database schema and table.} +#' \item{multipartKey}{array, The source table primary key.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{targetSchema}{string, The output table schema.} +#' \item{targetTable}{string, The output table name.} +#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} +#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} +#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_put_civis_data_match_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +enhancements_get_geocode <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/shares/users" + path <- "/enhancements/geocode/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. -#' -#' @return An empty HTTP response -#' @export -enhancements_delete_civis_data_match_shares_users <- function(id, user_id) { - - args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) - query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Replace all attributes of this Geocode Enhancement +#' @param id integer required. The ID for the enhancement. +#' @param name string required. The name of the enhancement job. +#' @param remote_host_id integer required. The ID of the remote host. +#' @param credential_id integer required. The ID of the remote host credential. +#' @param source_schema_and_table string required. The source database schema and table. +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param parent_id integer optional. Parent ID that triggers this enhancement. +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param multipart_key array optional. The source table primary key. +#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' @param target_schema string optional. The output table schema. +#' @param target_table string optional. The output table name. +#' @param country string optional. The country of the addresses to be geocoded; either 'us' or 'ca'. +#' @param provider string optional. The geocoding provider; one of postgis, nominatim, and geocoder_ca. +#' @param output_address boolean optional. Whether to output the parsed address. Only guaranteed for the 'postgis' provider. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID for the enhancement.} +#' \item{name}{string, The name of the enhancement job.} +#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} +#' \item{createdAt}{string, The time this enhancement was created.} +#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{state}{string, The status of the enhancement's last run} +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{parentId}{integer, Parent ID that triggers this enhancement.} +#' \item{notifications}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{remoteHostId}{integer, The ID of the remote host.} +#' \item{credentialId}{integer, The ID of the remote host credential.} +#' \item{sourceSchemaAndTable}{string, The source database schema and table.} +#' \item{multipartKey}{array, The source table primary key.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{targetSchema}{string, The output table schema.} +#' \item{targetTable}{string, The output table name.} +#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} +#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} +#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_put_civis_data_match_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +enhancements_put_geocode <- function(id, name, remote_host_id, credential_id, source_schema_and_table, schedule = NULL, parent_id = NULL, notifications = NULL, multipart_key = NULL, limiting_sql = NULL, target_schema = NULL, target_table = NULL, country = NULL, provider = NULL, output_address = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/shares/groups" + path <- "/enhancements/geocode/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list(name = name, remoteHostId = remote_host_id, credentialId = credential_id, sourceSchemaAndTable = source_schema_and_table, schedule = schedule, parentId = parent_id, notifications = notifications, multipartKey = multipart_key, limitingSQL = limiting_sql, targetSchema = target_schema, targetTable = target_table, country = country, provider = provider, outputAddress = output_address) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -3793,32 +4729,42 @@ enhancements_put_civis_data_match_shares_groups <- function(id, group_ids, permi } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. -#' -#' @return An empty HTTP response -#' @export -enhancements_delete_civis_data_match_shares_groups <- function(id, group_id) { - - args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Update some attributes of this Geocode Enhancement +#' @param id integer required. The ID for the enhancement. +#' @param name string optional. The name of the enhancement job. +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param parent_id integer optional. Parent ID that triggers this enhancement. +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param remote_host_id integer optional. The ID of the remote host. +#' @param credential_id integer optional. The ID of the remote host credential. +#' @param source_schema_and_table string optional. The source database schema and table. +#' @param multipart_key array optional. The source table primary key. +#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' @param target_schema string optional. The output table schema. +#' @param target_table string optional. The output table name. +#' @param country string optional. The country of the addresses to be geocoded; either 'us' or 'ca'. +#' @param provider string optional. The geocoding provider; one of postgis, nominatim, and geocoder_ca. +#' @param output_address boolean optional. Whether to output the parsed address. Only guaranteed for the 'postgis' provider. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the enhancement.} @@ -3838,10 +4784,11 @@ enhancements_delete_civis_data_match_shares_groups <- function(id, group_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{parentId}{integer, Parent ID that triggers this enhancement.} #' \item{notifications}{list, A list containing the following elements: @@ -3865,85 +4812,89 @@ enhancements_delete_civis_data_match_shares_groups <- function(id, group_id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} -#' \item{inputTable}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' }} -#' \item{matchTargetId}{integer, The ID of the Civis Data match target. See /match_targets for IDs.} -#' \item{outputTable}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseName string, The Redshift database name for the table. -#' \item schema string, The schema name for the table. -#' \item table string, The table name. -#' }} -#' \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -#' \item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} -#' \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{remoteHostId}{integer, The ID of the remote host.} +#' \item{credentialId}{integer, The ID of the remote host credential.} +#' \item{sourceSchemaAndTable}{string, The source database schema and table.} +#' \item{multipartKey}{array, The source table primary key.} +#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{targetSchema}{string, The output table schema.} +#' \item{targetTable}{string, The output table name.} +#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} +#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} +#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_put_civis_data_match_archive <- function(id, status) { +enhancements_patch_geocode <- function(id, name = NULL, schedule = NULL, parent_id = NULL, notifications = NULL, remote_host_id = NULL, credential_id = NULL, source_schema_and_table = NULL, multipart_key = NULL, limiting_sql = NULL, target_schema = NULL, target_table = NULL, country = NULL, provider = NULL, output_address = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/archive" + path <- "/enhancements/geocode/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(status = status) + body_params <- list(name = name, schedule = schedule, parentId = parent_id, notifications = notifications, remoteHostId = remote_host_id, credentialId = credential_id, sourceSchemaAndTable = source_schema_and_table, multipartKey = multipart_key, limitingSQL = limiting_sql, targetSchema = target_schema, targetTable = target_table, country = country, provider = provider, outputAddress = output_address) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' List the projects a Civis Data Match Enhancement belongs to -#' @param id integer required. The ID of the Civis Data Match Enhancement. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' Start a run +#' @param id integer required. The ID of the geocode. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the run.} +#' \item{geocodeId}{integer, The ID of the geocode.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} +#' @export +enhancements_post_geocode_runs <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/geocode/{id}/runs" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List runs for the given geocode +#' @param id integer required. The ID of the geocode. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{id}{integer, The ID of the run.} +#' \item{geocodeId}{integer, The ID of the geocode.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -enhancements_list_civis_data_match_projects <- function(id, hidden = NULL) { +enhancements_list_geocode_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/projects" + path <- "/enhancements/geocode/{id}/runs" path_params <- list(id = id) - query_params <- list(hidden = hidden) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -3955,40 +4906,48 @@ enhancements_list_civis_data_match_projects <- function(id, hidden = NULL) { } -#' Add a Civis Data Match Enhancement to a project -#' @param id integer required. The ID of the Civis Data Match Enhancement. -#' @param project_id integer required. The ID of the project. +#' Check status of a run +#' @param id integer required. The ID of the geocode. +#' @param run_id integer required. The ID of the run. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the run.} +#' \item{geocodeId}{integer, The ID of the geocode.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -enhancements_put_civis_data_match_projects <- function(id, project_id) { +enhancements_get_geocode_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/enhancements/geocode/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Remove a Civis Data Match Enhancement from a project -#' @param id integer required. The ID of the Civis Data Match Enhancement. -#' @param project_id integer required. The ID of the project. +#' Cancel a run +#' @param id integer required. The ID of the geocode. +#' @param run_id integer required. The ID of the run. #' #' @return An empty HTTP response #' @export -enhancements_delete_civis_data_match_projects <- function(id, project_id) { +enhancements_delete_geocode_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/civis-data-match/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/enhancements/geocode/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -4001,17 +4960,24 @@ enhancements_delete_civis_data_match_projects <- function(id, project_id) { } -#' List available enhancement types +#' Get the logs for a run +#' @param id integer required. The ID of the geocode. +#' @param run_id integer required. The ID of the run. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. #' #' @return An array containing the following fields: -#' \item{name}{string, The name of the type.} +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} #' @export -enhancements_list_types <- function() { +enhancements_list_geocode_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/types" - path_params <- list() - query_params <- list() + path <- "/enhancements/geocode/{id}/runs/{run_id}/logs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(last_id = last_id, limit = limit) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -4023,62 +4989,52 @@ enhancements_list_types <- function() { } -#' List the fields in a field mapping for Civis Data Match, Data Unification, and Table Deduplication jobs +#' Cancel a run +#' @param id integer required. The ID of the job. #' -#' @return An array containing the following fields: -#' \item{field}{string, The name of the field.} -#' \item{description}{string, The description of the field.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the run.} +#' \item{state}{string, The state of the run, one of 'queued', 'running' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} #' @export -enhancements_list_field_mapping <- function() { +enhancements_post_geocode_cancel <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/field-mapping" - path_params <- list() + path <- "/enhancements/geocode/{id}/cancel" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List Enhancements -#' @param type string optional. If specified, return items of these types. -#' @param author string optional. If specified, return items from this author. Must use user IDs. A comma separated list of IDs is also accepted to return items from multiple authors. -#' @param status string optional. If specified, returns items with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. -#' @param archived string optional. The archival status of the requested item(s). -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. +#' List the outputs for a run +#' @param id integer required. The ID of the job. +#' @param run_id integer required. The ID of the run. +#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. +#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, id. #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{objectType}{string, The type of the output. Valid values are File, Table, Report, Project, Credential, or JSONValue} +#' \item{objectId}{integer, The ID of the output.} +#' \item{name}{string, The name of the output.} +#' \item{link}{string, The hypermedia link to the output.} +#' \item{value}{string, } #' @export -enhancements_list <- function(type = NULL, author = NULL, status = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +enhancements_list_geocode_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/" - path_params <- list() - query_params <- list(type = type, author = author, status = status, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/enhancements/geocode/{id}/runs/{run_id}/outputs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -4090,172 +5046,247 @@ enhancements_list <- function(type = NULL, author = NULL, status = NULL, archive } -#' Create a CASS/NCOA Enhancement -#' @param name string required. The name of the enhancement job. -#' @param source list required. A list containing the following elements: +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. +#' +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item databaseTable list . A list containing the following elements: +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The schema name of the source table. -#' \item table string, The name of the source table. -#' \item remoteHostId integer, The ID of the database host for the table. -#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. -#' \item multipartKey array, The source table primary key. -#' } -#' } -#' @param schedule list optional. A list containing the following elements: +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param parent_id integer optional. Parent ID that triggers this enhancement. -#' @param notifications list optional. A list containing the following elements: +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +enhancements_list_cass_ncoa_shares <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/shares" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' +#' @return A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param destination list optional. A list containing the following elements: +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name for the output data. -#' \item table string, The table name for the output data. -#' } -#' } -#' @param column_mapping list optional. A list containing the following elements: -#' \itemize{ -#' \item address1 string, The first address line. -#' \item address2 string, The second address line. -#' \item city string, The city of an address. -#' \item state string, The state of an address. -#' \item zip string, The zip code of an address. -#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` -#' \item company string, The name of the company located at this address. -#' } -#' @param use_default_column_mapping boolean optional. Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided. -#' @param perform_ncoa boolean optional. Whether to update addresses for records matching the National Change of Address (NCOA) database. -#' @param ncoa_credential_id integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true. -#' @param output_level string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned. -#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item users array, +#' \item groups array, #' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. +#' \item users array, +#' \item groups array, #' }} -#' \item{runningAs}{list, A list containing the following elements: +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +enhancements_put_cass_ncoa_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/shares/users" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. +#' +#' @return An empty HTTP response +#' @export +enhancements_delete_cass_ncoa_shares_users <- function(id, user_id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' +#' @return A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item users array, +#' \item groups array, #' }} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The schema name of the source table. -#' \item table string, The name of the source table. -#' \item remoteHostId integer, The ID of the database host for the table. -#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. -#' \item multipartKey array, The source table primary key. -#' } +#' \item users array, +#' \item groups array, #' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The schema name for the output data. -#' \item table string, The table name for the output data. -#' } +#' \item users array, +#' \item groups array, #' }} -#' \item{columnMapping}{list, A list containing the following elements: +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +enhancements_put_cass_ncoa_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/shares/groups" + path_params <- list(id = id) + query_params <- list() + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. +#' +#' @return An empty HTTP response +#' @export +enhancements_delete_cass_ncoa_shares_groups <- function(id, group_id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +enhancements_list_cass_ncoa_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: #' \itemize{ -#' \item address1 string, The first address line. -#' \item address2 string, The second address line. -#' \item city string, The city of an address. -#' \item state string, The state of an address. -#' \item zip string, The zip code of an address. -#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` -#' \item company string, The name of the company located at this address. +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user #' }} -#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} -#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} -#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} -#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} -#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_post_cass_ncoa <- function(name, source, schedule = NULL, parent_id = NULL, notifications = NULL, destination = NULL, column_mapping = NULL, use_default_column_mapping = NULL, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, limiting_sql = NULL) { +enhancements_put_cass_ncoa_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa" - path_params <- list() + path <- "/enhancements/cass-ncoa/{id}/transfer" + path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, source = source, schedule = schedule, parentId = parent_id, notifications = notifications, destination = destination, columnMapping = column_mapping, useDefaultColumnMapping = use_default_column_mapping, performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level, limitingSQL = limiting_sql) + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get a CASS/NCOA Enhancement -#' @param id integer required. +#' List the projects a CASS/NCOA Enhancement belongs to +#' @param id integer required. The ID of the CASS/NCOA Enhancement. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this project.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -4264,30 +5295,9 @@ enhancements_post_cass_ncoa <- function(name, source, schedule = NULL, parent_id #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{runningAs}{list, A list containing the following elements: +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -4295,48 +5305,17 @@ enhancements_post_cass_ncoa <- function(name, source, schedule = NULL, parent_id #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name of the source table. -#' \item table string, The name of the source table. -#' \item remoteHostId integer, The ID of the database host for the table. -#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. -#' \item multipartKey array, The source table primary key. -#' } -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name for the output data. -#' \item table string, The table name for the output data. -#' } -#' }} -#' \item{columnMapping}{list, A list containing the following elements: -#' \itemize{ -#' \item address1 string, The first address line. -#' \item address2 string, The second address line. -#' \item city string, The city of an address. -#' \item state string, The state of an address. -#' \item zip string, The zip code of an address. -#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` -#' \item company string, The name of the company located at this address. -#' }} -#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} -#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} -#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} -#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_get_cass_ncoa <- function(id) { +enhancements_list_cass_ncoa_projects <- function(id, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}" + path <- "/enhancements/cass-ncoa/{id}/projects" path_params <- list(id = id) - query_params <- list() + query_params <- list(hidden = hidden) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -4348,65 +5327,55 @@ enhancements_get_cass_ncoa <- function(id) { } -#' Replace all attributes of this CASS/NCOA Enhancement -#' @param id integer required. The ID for the enhancement. -#' @param name string required. The name of the enhancement job. -#' @param source list required. A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name of the source table. -#' \item table string, The name of the source table. -#' \item remoteHostId integer, The ID of the database host for the table. -#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. -#' \item multipartKey array, The source table primary key. -#' } -#' } -#' @param schedule list optional. A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param parent_id integer optional. Parent ID that triggers this enhancement. -#' @param notifications list optional. A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param destination list optional. A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name for the output data. -#' \item table string, The table name for the output data. -#' } -#' } -#' @param column_mapping list optional. A list containing the following elements: -#' \itemize{ -#' \item address1 string, The first address line. -#' \item address2 string, The second address line. -#' \item city string, The city of an address. -#' \item state string, The state of an address. -#' \item zip string, The zip code of an address. -#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` -#' \item company string, The name of the company located at this address. -#' } -#' @param use_default_column_mapping boolean optional. Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided. -#' @param perform_ncoa boolean optional. Whether to update addresses for records matching the National Change of Address (NCOA) database. -#' @param ncoa_credential_id integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true. -#' @param output_level string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned. -#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' Add a CASS/NCOA Enhancement to a project +#' @param id integer required. The ID of the CASS/NCOA Enhancement. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +enhancements_put_cass_ncoa_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Remove a CASS/NCOA Enhancement from a project +#' @param id integer required. The ID of the CASS/NCOA Enhancement. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +enhancements_delete_cass_ncoa_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/cass-ncoa/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the enhancement.} @@ -4426,10 +5395,11 @@ enhancements_get_cass_ncoa <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{parentId}{integer, Parent ID that triggers this enhancement.} #' \item{notifications}{list, A list containing the following elements: @@ -4453,6 +5423,7 @@ enhancements_get_cass_ncoa <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{source}{list, A list containing the following elements: #' \itemize{ #' \item databaseTable list . A list containing the following elements: @@ -4487,15 +5458,16 @@ enhancements_get_cass_ncoa <- function(id) { #' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} #' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} #' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} #' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_put_cass_ncoa <- function(id, name, source, schedule = NULL, parent_id = NULL, notifications = NULL, destination = NULL, column_mapping = NULL, use_default_column_mapping = NULL, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, limiting_sql = NULL) { +enhancements_put_cass_ncoa_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}" + path <- "/enhancements/cass-ncoa/{id}/archive" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, source = source, schedule = schedule, parentId = parent_id, notifications = notifications, destination = destination, columnMapping = column_mapping, useDefaultColumnMapping = use_default_column_mapping, performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level, limitingSQL = limiting_sql) + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -4506,267 +5478,165 @@ enhancements_put_cass_ncoa <- function(id, name, source, schedule = NULL, parent } -#' Update some attributes of this CASS/NCOA Enhancement -#' @param id integer required. The ID for the enhancement. -#' @param name string optional. The name of the enhancement job. -#' @param schedule list optional. A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param parent_id integer optional. Parent ID that triggers this enhancement. -#' @param notifications list optional. A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param source list optional. A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name of the source table. -#' \item table string, The name of the source table. -#' \item remoteHostId integer, The ID of the database host for the table. -#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. -#' \item multipartKey array, The source table primary key. -#' } -#' } -#' @param destination list optional. A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name for the output data. -#' \item table string, The table name for the output data. -#' } -#' } -#' @param column_mapping list optional. A list containing the following elements: -#' \itemize{ -#' \item address1 string, The first address line. -#' \item address2 string, The second address line. -#' \item city string, The city of an address. -#' \item state string, The state of an address. -#' \item zip string, The zip code of an address. -#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` -#' \item company string, The name of the company located at this address. -#' } -#' @param use_default_column_mapping boolean optional. Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided. -#' @param perform_ncoa boolean optional. Whether to update addresses for records matching the National Change of Address (NCOA) database. -#' @param ncoa_credential_id integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true. -#' @param output_level string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned. -#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} -#' \item{author}{list, A list containing the following elements: +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item users array, +#' \item groups array, #' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item users array, +#' \item groups array, #' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. +#' \item users array, +#' \item groups array, #' }} -#' \item{runningAs}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name of the source table. -#' \item table string, The name of the source table. -#' \item remoteHostId integer, The ID of the database host for the table. -#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. -#' \item multipartKey array, The source table primary key. -#' } -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name for the output data. -#' \item table string, The table name for the output data. -#' } -#' }} -#' \item{columnMapping}{list, A list containing the following elements: -#' \itemize{ -#' \item address1 string, The first address line. -#' \item address2 string, The second address line. -#' \item city string, The city of an address. -#' \item state string, The state of an address. -#' \item zip string, The zip code of an address. -#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` -#' \item company string, The name of the company located at this address. -#' }} -#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} -#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} -#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} -#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -enhancements_patch_cass_ncoa <- function(id, name = NULL, schedule = NULL, parent_id = NULL, notifications = NULL, source = NULL, destination = NULL, column_mapping = NULL, use_default_column_mapping = NULL, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, limiting_sql = NULL) { +enhancements_list_geocode_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}" + path <- "/enhancements/geocode/{id}/shares" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, schedule = schedule, parentId = parent_id, notifications = notifications, source = source, destination = destination, columnMapping = column_mapping, useDefaultColumnMapping = use_default_column_mapping, performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level, limitingSQL = limiting_sql) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Start a run -#' @param id integer required. The ID of the cass_ncoa. +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{cassNcoaId}{integer, The ID of the cass_ncoa.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -enhancements_post_cass_ncoa_runs <- function(id) { +enhancements_put_geocode_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/runs" + path <- "/enhancements/geocode/{id}/shares/users" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' List runs for the given cass_ncoa -#' @param id integer required. The ID of the cass_ncoa. -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the run.} -#' \item{cassNcoaId}{integer, The ID of the cass_ncoa.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' @return An empty HTTP response #' @export -enhancements_list_cass_ncoa_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +enhancements_delete_geocode_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/runs" - path_params <- list(id = id) - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/enhancements/geocode/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Check status of a run -#' @param id integer required. The ID of the cass_ncoa. -#' @param run_id integer required. The ID of the run. +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{cassNcoaId}{integer, The ID of the cass_ncoa.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -enhancements_get_cass_ncoa_runs <- function(id, run_id) { +enhancements_put_geocode_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) + path <- "/enhancements/geocode/{id}/shares/groups" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Cancel a run -#' @param id integer required. The ID of the cass_ncoa. -#' @param run_id integer required. The ID of the run. +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. #' #' @return An empty HTTP response #' @export -enhancements_delete_cass_ncoa_runs <- function(id, run_id) { +enhancements_delete_geocode_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) + path <- "/enhancements/geocode/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -4779,24 +5649,24 @@ enhancements_delete_cass_ncoa_runs <- function(id, run_id) { } -#' Get the logs for a run -#' @param id integer required. The ID of the cass_ncoa. -#' @param run_id integer required. The ID of the run. -#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. -#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of the log.} -#' \item{createdAt}{string, The time the log was created.} -#' \item{message}{string, The log message.} -#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -enhancements_list_cass_ncoa_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { +enhancements_list_geocode_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}/logs" - path_params <- list(id = id, run_id = run_id) - query_params <- list(last_id = last_id, limit = limit) + path <- "/enhancements/geocode/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -4808,104 +5678,47 @@ enhancements_list_cass_ncoa_runs_logs <- function(id, run_id, last_id = NULL, li } -#' Cancel a run -#' @param id integer required. The ID of the job. +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{state}{string, The state of the run, one of 'queued', 'running' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} #' @export -enhancements_post_cass_ncoa_cancel <- function(id) { +enhancements_put_geocode_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/cancel" + path <- "/enhancements/geocode/{id}/transfer" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' List the outputs for a run -#' @param id integer required. The ID of the job. -#' @param run_id integer required. The ID of the run. -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, id. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' List the projects a Geocode Enhancement belongs to +#' @param id integer required. The ID of the Geocode Enhancement. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' #' @return An array containing the following fields: -#' \item{objectType}{string, The type of the output. Valid values are File, Table, Report, Project, Credential, or JSONValue} -#' \item{objectId}{integer, The ID of the output.} -#' \item{name}{string, The name of the output.} -#' \item{link}{string, The hypermedia link to the output.} -#' \item{value}{string, } -#' @export -enhancements_list_cass_ncoa_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { - - args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/runs/{run_id}/outputs" - path_params <- list(id = id, run_id = run_id) - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Create a Geocode Enhancement -#' @param name string required. The name of the enhancement job. -#' @param remote_host_id integer required. The ID of the remote host. -#' @param credential_id integer required. The ID of the remote host credential. -#' @param source_schema_and_table string required. The source database schema and table. -#' @param schedule list optional. A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param parent_id integer optional. Parent ID that triggers this enhancement. -#' @param notifications list optional. A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param multipart_key array optional. The source table primary key. -#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). -#' @param target_schema string optional. The output table schema. -#' @param target_table string optional. The output table name. -#' @param country string optional. The country of the addresses to be geocoded; either 'us' or 'ca'. -#' @param provider string optional. The geocoding provider; one of postgis, nominatim, and geocoder_ca. -#' @param output_address boolean optional. Whether to output the parsed address. Only guaranteed for the 'postgis' provider. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{id}{integer, The ID for this project.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -4914,30 +5727,9 @@ enhancements_list_cass_ncoa_runs_outputs <- function(id, run_id, limit = NULL, p #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{runningAs}{list, A list containing the following elements: +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -4945,37 +5737,77 @@ enhancements_list_cass_ncoa_runs_outputs <- function(id, run_id, limit = NULL, p #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{remoteHostId}{integer, The ID of the remote host.} -#' \item{credentialId}{integer, The ID of the remote host credential.} -#' \item{sourceSchemaAndTable}{string, The source database schema and table.} -#' \item{multipartKey}{array, The source table primary key.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} -#' \item{targetSchema}{string, The output table schema.} -#' \item{targetTable}{string, The output table name.} -#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} -#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} -#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_post_geocode <- function(name, remote_host_id, credential_id, source_schema_and_table, schedule = NULL, parent_id = NULL, notifications = NULL, multipart_key = NULL, limiting_sql = NULL, target_schema = NULL, target_table = NULL, country = NULL, provider = NULL, output_address = NULL) { +enhancements_list_geocode_projects <- function(id, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode" - path_params <- list() + path <- "/enhancements/geocode/{id}/projects" + path_params <- list(id = id) + query_params <- list(hidden = hidden) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Add a Geocode Enhancement to a project +#' @param id integer required. The ID of the Geocode Enhancement. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +enhancements_put_geocode_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/geocode/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() - body_params <- list(name = name, remoteHostId = remote_host_id, credentialId = credential_id, sourceSchemaAndTable = source_schema_and_table, schedule = schedule, parentId = parent_id, notifications = notifications, multipartKey = multipart_key, limitingSQL = limiting_sql, targetSchema = target_schema, targetTable = target_table, country = country, provider = provider, outputAddress = output_address) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get a Geocode Enhancement -#' @param id integer required. +#' Remove a Geocode Enhancement from a project +#' @param id integer required. The ID of the Geocode Enhancement. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +enhancements_delete_geocode_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/enhancements/geocode/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the enhancement.} @@ -4995,10 +5827,11 @@ enhancements_post_geocode <- function(name, remote_host_id, credential_id, sourc #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{parentId}{integer, Parent ID that triggers this enhancement.} #' \item{notifications}{list, A list containing the following elements: @@ -5022,6 +5855,7 @@ enhancements_post_geocode <- function(name, remote_host_id, credential_id, sourc #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{remoteHostId}{integer, The ID of the remote host.} #' \item{credentialId}{integer, The ID of the remote host credential.} #' \item{sourceSchemaAndTable}{string, The source database schema and table.} @@ -5034,97 +5868,51 @@ enhancements_post_geocode <- function(name, remote_host_id, credential_id, sourc #' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} #' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_get_geocode <- function(id) { +enhancements_put_geocode_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}" + path <- "/enhancements/geocode/{id}/archive" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Replace all attributes of this Geocode Enhancement -#' @param id integer required. The ID for the enhancement. -#' @param name string required. The name of the enhancement job. -#' @param remote_host_id integer required. The ID of the remote host. -#' @param credential_id integer required. The ID of the remote host credential. -#' @param source_schema_and_table string required. The source database schema and table. -#' @param schedule list optional. A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param parent_id integer optional. Parent ID that triggers this enhancement. -#' @param notifications list optional. A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param multipart_key array optional. The source table primary key. -#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). -#' @param target_schema string optional. The output table schema. -#' @param target_table string optional. The output table name. -#' @param country string optional. The country of the addresses to be geocoded; either 'us' or 'ca'. -#' @param provider string optional. The geocoding provider; one of postgis, nominatim, and geocoder_ca. -#' @param output_address boolean optional. Whether to output the parsed address. Only guaranteed for the 'postgis' provider. +#' List +#' @param type string optional. If specified, return exports of these types. It accepts a comma-separated list, possible values are 'database' and 'gdoc'. +#' @param status string optional. If specified, returns export with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param archived string optional. The archival status of the requested item(s). +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this export.} +#' \item{name}{string, The name of this export.} +#' \item{type}{string, The type of export.} +#' \item{createdAt}{string, The creation time for this export.} +#' \item{updatedAt}{string, The last modification time for this export.} +#' \item{state}{string, } +#' \item{lastRun}{list, A list containing the following elements: #' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{runningAs}{list, A list containing the following elements: +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -5132,139 +5920,18 @@ enhancements_get_geocode <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{remoteHostId}{integer, The ID of the remote host.} -#' \item{credentialId}{integer, The ID of the remote host credential.} -#' \item{sourceSchemaAndTable}{string, The source database schema and table.} -#' \item{multipartKey}{array, The source table primary key.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} -#' \item{targetSchema}{string, The output table schema.} -#' \item{targetTable}{string, The output table name.} -#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} -#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} -#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} -#' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_put_geocode <- function(id, name, remote_host_id, credential_id, source_schema_and_table, schedule = NULL, parent_id = NULL, notifications = NULL, multipart_key = NULL, limiting_sql = NULL, target_schema = NULL, target_table = NULL, country = NULL, provider = NULL, output_address = NULL) { +exports_list <- function(type = NULL, status = NULL, author = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list(name = name, remoteHostId = remote_host_id, credentialId = credential_id, sourceSchemaAndTable = source_schema_and_table, schedule = schedule, parentId = parent_id, notifications = notifications, multipartKey = multipart_key, limitingSQL = limiting_sql, targetSchema = target_schema, targetTable = target_table, country = country, provider = provider, outputAddress = output_address) + path <- "/exports/" + path_params <- list() + query_params <- list(type = type, status = status, author = author, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Update some attributes of this Geocode Enhancement -#' @param id integer required. The ID for the enhancement. -#' @param name string optional. The name of the enhancement job. -#' @param schedule list optional. A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param parent_id integer optional. Parent ID that triggers this enhancement. -#' @param notifications list optional. A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param remote_host_id integer optional. The ID of the remote host. -#' @param credential_id integer optional. The ID of the remote host credential. -#' @param source_schema_and_table string optional. The source database schema and table. -#' @param multipart_key array optional. The source table primary key. -#' @param limiting_sql string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL'). -#' @param target_schema string optional. The output table schema. -#' @param target_table string optional. The output table name. -#' @param country string optional. The country of the addresses to be geocoded; either 'us' or 'ca'. -#' @param provider string optional. The geocoding provider; one of postgis, nominatim, and geocoder_ca. -#' @param output_address boolean optional. Whether to output the parsed address. Only guaranteed for the 'postgis' provider. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{runningAs}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{remoteHostId}{integer, The ID of the remote host.} -#' \item{credentialId}{integer, The ID of the remote host credential.} -#' \item{sourceSchemaAndTable}{string, The source database schema and table.} -#' \item{multipartKey}{array, The source table primary key.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} -#' \item{targetSchema}{string, The output table schema.} -#' \item{targetTable}{string, The output table name.} -#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} -#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} -#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' @export -enhancements_patch_geocode <- function(id, name = NULL, schedule = NULL, parent_id = NULL, notifications = NULL, remote_host_id = NULL, credential_id = NULL, source_schema_and_table = NULL, multipart_key = NULL, limiting_sql = NULL, target_schema = NULL, target_table = NULL, country = NULL, provider = NULL, output_address = NULL) { - - args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list(name = name, schedule = schedule, parentId = parent_id, notifications = notifications, remoteHostId = remote_host_id, credentialId = credential_id, sourceSchemaAndTable = source_schema_and_table, multipartKey = multipart_key, limitingSQL = limiting_sql, targetSchema = target_schema, targetTable = target_table, country = country, provider = provider, outputAddress = output_address) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) @@ -5272,21 +5939,21 @@ enhancements_patch_geocode <- function(id, name = NULL, schedule = NULL, parent_ #' Start a run -#' @param id integer required. The ID of the geocode. +#' @param id integer required. The ID of the csv_export. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{geocodeId}{integer, The ID of the geocode.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{id}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, The time that the run was queued.} +#' \item{startedAt}{string, The time that the run started.} +#' \item{finishedAt}{string, The time that the run completed.} +#' \item{error}{string, The error message for this run, if present.} +#' \item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} #' @export -enhancements_post_geocode_runs <- function(id) { +exports_post_files_csv_runs <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/runs" + path <- "/exports/files/csv/{id}/runs" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -5300,26 +5967,25 @@ enhancements_post_geocode_runs <- function(id) { } -#' List runs for the given geocode -#' @param id integer required. The ID of the geocode. +#' List runs for the given csv_export +#' @param id integer required. The ID of the csv_export. #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. #' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of the run.} -#' \item{geocodeId}{integer, The ID of the geocode.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{id}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, The time that the run was queued.} +#' \item{startedAt}{string, The time that the run started.} +#' \item{finishedAt}{string, The time that the run completed.} +#' \item{error}{string, The error message for this run, if present.} #' @export -enhancements_list_geocode_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +exports_list_files_csv_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/runs" + path <- "/exports/files/csv/{id}/runs" path_params <- list(id = id) query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() @@ -5334,22 +6000,22 @@ enhancements_list_geocode_runs <- function(id, limit = NULL, page_num = NULL, or #' Check status of a run -#' @param id integer required. The ID of the geocode. +#' @param id integer required. The ID of the csv_export. #' @param run_id integer required. The ID of the run. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{geocodeId}{integer, The ID of the geocode.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{id}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, The time that the run was queued.} +#' \item{startedAt}{string, The time that the run started.} +#' \item{finishedAt}{string, The time that the run completed.} +#' \item{error}{string, The error message for this run, if present.} +#' \item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} #' @export -enhancements_get_geocode_runs <- function(id, run_id) { +exports_get_files_csv_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/runs/{run_id}" + path <- "/exports/files/csv/{id}/runs/{run_id}" path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() @@ -5364,15 +6030,15 @@ enhancements_get_geocode_runs <- function(id, run_id) { #' Cancel a run -#' @param id integer required. The ID of the geocode. +#' @param id integer required. The ID of the csv_export. #' @param run_id integer required. The ID of the run. #' #' @return An empty HTTP response #' @export -enhancements_delete_geocode_runs <- function(id, run_id) { +exports_delete_files_csv_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/runs/{run_id}" + path <- "/exports/files/csv/{id}/runs/{run_id}" path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() @@ -5387,7 +6053,7 @@ enhancements_delete_geocode_runs <- function(id, run_id) { #' Get the logs for a run -#' @param id integer required. The ID of the geocode. +#' @param id integer required. The ID of the csv_export. #' @param run_id integer required. The ID of the run. #' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. #' @param limit integer optional. The maximum number of log messages to return. Default of 10000. @@ -5398,10 +6064,10 @@ enhancements_delete_geocode_runs <- function(id, run_id) { #' \item{message}{string, The log message.} #' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} #' @export -enhancements_list_geocode_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { +exports_list_files_csv_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/runs/{run_id}/logs" + path <- "/exports/files/csv/{id}/runs/{run_id}/logs" path_params <- list(id = id, run_id = run_id) query_params <- list(last_id = last_id, limit = limit) body_params <- list() @@ -5415,33 +6081,8 @@ enhancements_list_geocode_runs_logs <- function(id, run_id, last_id = NULL, limi } -#' Cancel a run -#' @param id integer required. The ID of the job. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{state}{string, The state of the run, one of 'queued', 'running' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' @export -enhancements_post_geocode_cancel <- function(id) { - - args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/cancel" - path_params <- list(id = id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) - - return(resp) - - } - - #' List the outputs for a run -#' @param id integer required. The ID of the job. +#' @param id integer required. The ID of the csv_export. #' @param run_id integer required. The ID of the run. #' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. @@ -5455,10 +6096,10 @@ enhancements_post_geocode_cancel <- function(id) { #' \item{link}{string, The hypermedia link to the output.} #' \item{value}{string, } #' @export -enhancements_list_geocode_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +exports_list_files_csv_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/runs/{run_id}/outputs" + path <- "/exports/files/csv/{id}/runs/{run_id}/outputs" path_params <- list(id = id, run_id = run_id) query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() @@ -5472,32 +6113,112 @@ enhancements_list_geocode_runs_outputs <- function(id, run_id, limit = NULL, pag } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' Create a CSV Export +#' @param source list required. A list containing the following elements: +#' \itemize{ +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' } +#' @param destination list required. A list containing the following elements: +#' \itemize{ +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } +#' } +#' @param name string optional. The name of this Csv Export job. +#' @param include_header boolean optional. A boolean value indicating whether or not the header should be included. Defaults to true. +#' @param compression string optional. The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip". +#' @param column_delimiter string optional. The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". +#' @param hidden boolean optional. A boolean value indicating whether or not this request should be hidden. Defaults to false. +#' @param force_multifile boolean optional. Whether or not the csv should be split into multiple files. Default: false +#' @param max_file_size integer optional. The max file size, in MB, created files will be. Only available when force_multifile is true. #' -#' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this Csv Export job.} +#' \item{name}{string, The name of this Csv Export job.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} +#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} +#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} +#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} +#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' @export +exports_post_files_csv <- function(source, destination, name = NULL, include_header = NULL, compression = NULL, column_delimiter = NULL, hidden = NULL, force_multifile = NULL, max_file_size = NULL) { + + args <- as.list(match.call())[-1] + path <- "/exports/files/csv" + path_params <- list() + query_params <- list() + body_params <- list(source = source, destination = destination, name = name, includeHeader = include_header, compression = compression, columnDelimiter = column_delimiter, hidden = hidden, forceMultifile = force_multifile, maxFileSize = max_file_size) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get a CSV Export +#' @param id integer required. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this Csv Export job.} +#' \item{name}{string, The name of this Csv Export job.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } +#' }} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} +#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} +#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} +#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} +#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -enhancements_list_cass_ncoa_shares <- function(id) { +exports_get_files_csv <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/shares" + path <- "/exports/files/csv/{id}" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -5511,39 +6232,68 @@ enhancements_list_cass_ncoa_shares <- function(id) { } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Replace all attributes of this CSV Export +#' @param id integer required. The ID of this Csv Export job. +#' @param source list required. A list containing the following elements: +#' \itemize{ +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' } +#' @param destination list required. A list containing the following elements: +#' \itemize{ +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } +#' } +#' @param name string optional. The name of this Csv Export job. +#' @param include_header boolean optional. A boolean value indicating whether or not the header should be included. Defaults to true. +#' @param compression string optional. The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip". +#' @param column_delimiter string optional. The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". +#' @param hidden boolean optional. A boolean value indicating whether or not this request should be hidden. Defaults to false. +#' @param force_multifile boolean optional. Whether or not the csv should be split into multiple files. Default: false +#' @param max_file_size integer optional. The max file size, in MB, created files will be. Only available when force_multifile is true. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID of this Csv Export job.} +#' \item{name}{string, The name of this Csv Export job.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} +#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} +#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} +#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} +#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -enhancements_put_cass_ncoa_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +exports_put_files_csv <- function(id, source, destination, name = NULL, include_header = NULL, compression = NULL, column_delimiter = NULL, hidden = NULL, force_multifile = NULL, max_file_size = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/shares/users" + path <- "/exports/files/csv/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list(source = source, destination = destination, name = name, includeHeader = include_header, compression = compression, columnDelimiter = column_delimiter, hidden = hidden, forceMultifile = force_multifile, maxFileSize = max_file_size) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -5554,62 +6304,117 @@ enhancements_put_cass_ncoa_shares_users <- function(id, user_ids, permission_lev } -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' Update some attributes of this CSV Export +#' @param id integer required. The ID of this Csv Export job. +#' @param name string optional. The name of this Csv Export job. +#' @param source list optional. A list containing the following elements: +#' \itemize{ +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' } +#' @param destination list optional. A list containing the following elements: +#' \itemize{ +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } +#' } +#' @param include_header boolean optional. A boolean value indicating whether or not the header should be included. Defaults to true. +#' @param compression string optional. The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip". +#' @param column_delimiter string optional. The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". +#' @param hidden boolean optional. A boolean value indicating whether or not this request should be hidden. Defaults to false. +#' @param force_multifile boolean optional. Whether or not the csv should be split into multiple files. Default: false +#' @param max_file_size integer optional. The max file size, in MB, created files will be. Only available when force_multifile is true. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this Csv Export job.} +#' \item{name}{string, The name of this Csv Export job.} +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } +#' }} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} +#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} +#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} +#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} +#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -enhancements_delete_cass_ncoa_shares_users <- function(id, user_id) { +exports_patch_files_csv <- function(id, name = NULL, source = NULL, destination = NULL, include_header = NULL, compression = NULL, column_delimiter = NULL, hidden = NULL, force_multifile = NULL, max_file_size = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/exports/files/csv/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(name = name, source = source, destination = destination, includeHeader = include_header, compression = compression, columnDelimiter = column_delimiter, hidden = hidden, forceMultifile = force_multifile, maxFileSize = max_file_size) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID of this Csv Export job.} +#' \item{name}{string, The name of this Csv Export job.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item sql string, The SQL query for this Csv Export job +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: +#' \item filenamePrefix string, The prefix of the name of the file returned to the user. +#' \item storagePath list . A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" +#' \item storageHostId integer, The ID of the destination storage host. +#' \item credentialId integer, The ID of the credentials for the destination storage host. +#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. +#' } #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} +#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} +#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} +#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} +#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -enhancements_put_cass_ncoa_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +exports_put_files_csv_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/shares/groups" + path <- "/exports/files/csv/{id}/archive" path_params <- list(id = id) query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -5620,64 +6425,69 @@ enhancements_put_cass_ncoa_shares_groups <- function(id, group_ids, permission_l } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' List feature flags #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{name}{string, The name of the feature.} +#' \item{description}{string, } +#' \item{activeForMe}{boolean, Whether the feature is active for the current user.} +#' \item{userCount}{integer, The number of users with this feature flag enabled.} +#' \item{team}{string, } +#' \item{jira}{string, } +#' \item{added}{string, } +#' \item{groupCount}{integer, } +#' \item{organizationCount}{integer, } +#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} #' @export -enhancements_delete_cass_ncoa_shares_groups <- function(id, group_id) { +feature_flags_list <- function() { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) + path <- "/feature_flags/" + path_params <- list() query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List the projects a CASS/NCOA Enhancement belongs to -#' @param id integer required. The ID of the CASS/NCOA Enhancement. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' Show a feature flag +#' @param name string required. The name of the feature. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{name}{string, The name of the feature.} +#' \item{description}{string, } +#' \item{organizations}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item id integer, Organization ID +#' \item name string, Organization name +#' \item slug string, Organization slug +#' }} +#' \item{groups}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, Group ID +#' \item name string, Group name +#' \item slug string, Group slug +#' \item users array, Users within the group #' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} #' \item{users}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item id integer, User ID +#' \item username string, Username #' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} #' @export -enhancements_list_cass_ncoa_projects <- function(id, hidden = NULL) { +feature_flags_get <- function(name) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/projects" - path_params <- list(id = id) - query_params <- list(hidden = hidden) + path <- "/feature_flags/{name}" + path_params <- list(name = name) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -5689,17 +6499,83 @@ enhancements_list_cass_ncoa_projects <- function(id, hidden = NULL) { } -#' Add a CASS/NCOA Enhancement to a project -#' @param id integer required. The ID of the CASS/NCOA Enhancement. -#' @param project_id integer required. The ID of the project. +#' Activate a feature for a user +#' @param flag_name string required. The feature name. +#' @param user_id integer required. The user ID. +#' +#' @return A list containing the following elements: +#' \item{name}{string, The name of the feature.} +#' \item{description}{string, } +#' \item{activeForMe}{boolean, Whether the feature is active for the current user.} +#' \item{userCount}{integer, The number of users with this feature flag enabled.} +#' \item{team}{string, } +#' \item{jira}{string, } +#' \item{added}{string, } +#' \item{groupCount}{integer, } +#' \item{organizationCount}{integer, } +#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} +#' @export +feature_flags_put_users <- function(flag_name, user_id) { + + args <- as.list(match.call())[-1] + path <- "/feature_flags/{flag_name}/users/{user_id}" + path_params <- list(flag_name = flag_name, user_id = user_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Deactivate a feature for a user +#' @param flag_name string required. The feature name. +#' @param user_id integer required. The user ID. #' #' @return An empty HTTP response #' @export -enhancements_put_cass_ncoa_projects <- function(id, project_id) { +feature_flags_delete_users <- function(flag_name, user_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/feature_flags/{flag_name}/users/{user_id}" + path_params <- list(flag_name = flag_name, user_id = user_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Activate a feature for a group +#' @param flag_name string required. The feature flag name. +#' @param group_id integer required. Group ID. +#' +#' @return A list containing the following elements: +#' \item{name}{string, The name of the feature.} +#' \item{description}{string, } +#' \item{activeForMe}{boolean, Whether the feature is active for the current user.} +#' \item{userCount}{integer, The number of users with this feature flag enabled.} +#' \item{team}{string, } +#' \item{jira}{string, } +#' \item{added}{string, } +#' \item{groupCount}{integer, } +#' \item{organizationCount}{integer, } +#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} +#' @export +feature_flags_put_groups <- function(flag_name, group_id) { + + args <- as.list(match.call())[-1] + path <- "/feature_flags/{flag_name}/groups/{group_id}" + path_params <- list(flag_name = flag_name, group_id = group_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -5712,17 +6588,17 @@ enhancements_put_cass_ncoa_projects <- function(id, project_id) { } -#' Remove a CASS/NCOA Enhancement from a project -#' @param id integer required. The ID of the CASS/NCOA Enhancement. -#' @param project_id integer required. The ID of the project. +#' Deactivate a feature for a group +#' @param flag_name string required. The feature flag name. +#' @param group_id integer required. Group ID. #' #' @return An empty HTTP response #' @export -enhancements_delete_cass_ncoa_projects <- function(id, project_id) { +feature_flags_delete_groups <- function(flag_name, group_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/feature_flags/{flag_name}/groups/{group_id}" + path_params <- list(flag_name = flag_name, group_id = group_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -5735,16 +6611,68 @@ enhancements_delete_cass_ncoa_projects <- function(id, project_id) { } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Activate a feature for a organization +#' @param flag_name string required. The feature flag name. +#' @param organization_id integer required. Organization ID. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} +#' \item{name}{string, The name of the feature.} +#' \item{description}{string, } +#' \item{activeForMe}{boolean, Whether the feature is active for the current user.} +#' \item{userCount}{integer, The number of users with this feature flag enabled.} +#' \item{team}{string, } +#' \item{jira}{string, } +#' \item{added}{string, } +#' \item{groupCount}{integer, } +#' \item{organizationCount}{integer, } +#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} +#' @export +feature_flags_put_organizations <- function(flag_name, organization_id) { + + args <- as.list(match.call())[-1] + path <- "/feature_flags/{flag_name}/organizations/{organization_id}" + path_params <- list(flag_name = flag_name, organization_id = organization_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Deactivate a feature for a organization +#' @param flag_name string required. The feature flag name. +#' @param organization_id integer required. Organization ID. +#' +#' @return An empty HTTP response +#' @export +feature_flags_delete_organizations <- function(flag_name, organization_id) { + + args <- as.list(match.call())[-1] + path <- "/feature_flags/{flag_name}/organizations/{organization_id}" + path_params <- list(flag_name = flag_name, organization_id = organization_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List the projects a File belongs to +#' @param id integer required. The ID of the File. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this project.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -5753,30 +6681,9 @@ enhancements_delete_cass_ncoa_projects <- function(id, project_id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{runningAs}{list, A list containing the following elements: +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -5784,49 +6691,41 @@ enhancements_delete_cass_ncoa_projects <- function(id, project_id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name of the source table. -#' \item table string, The name of the source table. -#' \item remoteHostId integer, The ID of the database host for the table. -#' \item credentialId integer, The id of the credentials to be used when performing the enhancement. -#' \item multipartKey array, The source table primary key. -#' } -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The schema name for the output data. -#' \item table string, The table name for the output data. -#' } -#' }} -#' \item{columnMapping}{list, A list containing the following elements: -#' \itemize{ -#' \item address1 string, The first address line. -#' \item address2 string, The second address line. -#' \item city string, The city of an address. -#' \item state string, The state of an address. -#' \item zip string, The zip code of an address. -#' \item name string, The full name of the resident at this address. If needed, separate multiple columns with `+`, e.g. `first_name+last_name` -#' \item company string, The name of the company located at this address. -#' }} -#' \item{useDefaultColumnMapping}{boolean, Defaults to true, where the existing column mapping on the input table will be used. If false, a custom column mapping must be provided.} -#' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} -#' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} -#' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' \item{archived}{string, The archival status of the requested item(s).} #' @export -enhancements_put_cass_ncoa_archive <- function(id, status) { +files_list_projects <- function(id, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/cass-ncoa/{id}/archive" + path <- "/files/{id}/projects" path_params <- list(id = id) + query_params <- list(hidden = hidden) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Add a File to a project +#' @param id integer required. The ID of the File. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +files_put_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/files/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() - body_params <- list(status = status) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -5837,6 +6736,29 @@ enhancements_put_cass_ncoa_archive <- function(id, status) { } +#' Remove a File from a project +#' @param id integer required. The ID of the File. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +files_delete_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/files/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List users and groups permissioned on this object #' @param id integer required. The ID of the resource that is shared. #' @@ -5859,10 +6781,10 @@ enhancements_put_cass_ncoa_archive <- function(id, status) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -enhancements_list_geocode_shares <- function(id) { +files_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/shares" + path <- "/files/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -5902,10 +6824,10 @@ enhancements_list_geocode_shares <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -enhancements_put_geocode_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +files_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/shares/users" + path <- "/files/{id}/shares/users" path_params <- list(id = id) query_params <- list() body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -5925,10 +6847,10 @@ enhancements_put_geocode_shares_users <- function(id, user_ids, permission_level #' #' @return An empty HTTP response #' @export -enhancements_delete_geocode_shares_users <- function(id, user_id) { +files_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/shares/users/{user_id}" + path <- "/files/{id}/shares/users/{user_id}" path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() @@ -5968,10 +6890,10 @@ enhancements_delete_geocode_shares_users <- function(id, user_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -enhancements_put_geocode_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +files_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/shares/groups" + path <- "/files/{id}/shares/groups" path_params <- list(id = id) query_params <- list() body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -5991,10 +6913,10 @@ enhancements_put_geocode_shares_groups <- function(id, group_ids, permission_lev #' #' @return An empty HTTP response #' @export -enhancements_delete_geocode_shares_groups <- function(id, group_id) { +files_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/shares/groups/{group_id}" + path <- "/files/{id}/shares/groups/{group_id}" path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() @@ -6008,41 +6930,24 @@ enhancements_delete_geocode_shares_groups <- function(id, group_id) { } -#' List the projects a Geocode Enhancement belongs to -#' @param id integer required. The ID of the Geocode Enhancement. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -enhancements_list_geocode_projects <- function(id, hidden = NULL) { +files_list_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/projects" + path <- "/files/{id}/dependencies" path_params <- list(id = id) - query_params <- list(hidden = hidden) + query_params <- list(user_id = user_id) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -6054,19 +6959,31 @@ enhancements_list_geocode_projects <- function(id, hidden = NULL) { } -#' Add a Geocode Enhancement to a project -#' @param id integer required. The ID of the Geocode Enhancement. -#' @param project_id integer required. The ID of the project. +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} #' @export -enhancements_put_geocode_projects <- function(id, project_id) { +files_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/files/{id}/transfer" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -6077,134 +6994,100 @@ enhancements_put_geocode_projects <- function(id, project_id) { } -#' Remove a Geocode Enhancement from a project -#' @param id integer required. The ID of the Geocode Enhancement. -#' @param project_id integer required. The ID of the project. +#' Initiate an upload of a file into the platform +#' @param name string required. The file name. +#' @param expires_at string optional. The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the file.} +#' \item{name}{string, The file name.} +#' \item{createdAt}{string, The date and time the file was created.} +#' \item{fileSize}{integer, The file size.} +#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} +#' \item{uploadUrl}{string, The URL that may be used to upload a file. To use the upload URL, initiate a POST request to the given URL with the file you wish to import as the "file" form field.} +#' \item{uploadFields}{list, A hash containing the form fields to be included with the POST request.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -enhancements_delete_geocode_projects <- function(id, project_id) { +files_post <- function(name, expires_at = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/files/" + path_params <- list() query_params <- list() - body_params <- list() + body_params <- list(name = name, expiresAt = expires_at) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Initiate a multipart upload +#' @param name string required. The file name. +#' @param num_parts integer required. The number of parts in which the file will be uploaded. This parameter determines the number of presigned URLs that are returned. +#' @param expires_at string optional. The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the enhancement.} -#' \item{name}{string, The name of the enhancement job.} -#' \item{type}{string, The type of the enhancement (e.g CASS-NCOA)} -#' \item{createdAt}{string, The time this enhancement was created.} -#' \item{updatedAt}{string, The time the enhancement was last updated.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The status of the enhancement's last run} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, Parent ID that triggers this enhancement.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{runningAs}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{remoteHostId}{integer, The ID of the remote host.} -#' \item{credentialId}{integer, The ID of the remote host credential.} -#' \item{sourceSchemaAndTable}{string, The source database schema and table.} -#' \item{multipartKey}{array, The source table primary key.} -#' \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} -#' \item{targetSchema}{string, The output table schema.} -#' \item{targetTable}{string, The output table name.} -#' \item{country}{string, The country of the addresses to be geocoded; either 'us' or 'ca'.} -#' \item{provider}{string, The geocoding provider; one of postgis, nominatim, and geocoder_ca.} -#' \item{outputAddress}{boolean, Whether to output the parsed address. Only guaranteed for the 'postgis' provider.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{id}{integer, The ID of the file.} +#' \item{name}{string, The file name.} +#' \item{createdAt}{string, The date and time the file was created.} +#' \item{fileSize}{integer, The file size.} +#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} +#' \item{uploadUrls}{array, An array of URLs that may be used to upload file parts. Use separate PUT requests to complete the part uploads. Links expire after 12 hours.} #' @export -enhancements_put_geocode_archive <- function(id, status) { +files_post_multipart <- function(name, num_parts, expires_at = NULL) { args <- as.list(match.call())[-1] - path <- "/enhancements/geocode/{id}/archive" + path <- "/files/multipart" + path_params <- list() + query_params <- list() + body_params <- list(name = name, numParts = num_parts, expiresAt = expires_at) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Complete a multipart upload +#' @param id integer required. The ID of the file. +#' +#' @return An empty HTTP response +#' @export +files_post_multipart_complete <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/files/multipart/{id}/complete" path_params <- list(id = id) query_params <- list() - body_params <- list(status = status) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List -#' @param type string optional. If specified, return exports of these types. It accepts a comma-separated list, possible values are 'database' and 'gdoc'. -#' @param author string optional. If specified, return exports from this author. It accepts a comma-separated list of author ids. -#' @param status string optional. If specified, returns export with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. -#' @param archived string optional. The archival status of the requested item(s). -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Get details about a file +#' @param id integer required. The ID of the file. +#' @param link_expires_at string optional. The date and time the download link will expire. Must be a time between now and 36 hours from now. Defaults to 30 minutes from now. +#' @param inline boolean optional. If true, will return a url that can be displayed inline in HTML #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for this export.} -#' \item{name}{string, The name of this export.} -#' \item{type}{string, The type of export.} -#' \item{createdAt}{string, The creation time for this export.} -#' \item{updatedAt}{string, The last modification time for this export.} -#' \item{state}{string, } -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the file.} +#' \item{name}{string, The file name.} +#' \item{createdAt}{string, The date and time the file was created.} +#' \item{fileSize}{integer, The file size.} +#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -6213,13 +7096,23 @@ enhancements_put_geocode_archive <- function(id, status) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} +#' \item{downloadUrl}{string, A JSON string containing information about the URL of the file.} +#' \item{fileUrl}{string, The URL that may be used to download the file.} +#' \item{detectedInfo}{list, A list containing the following elements: +#' \itemize{ +#' \item includeHeader boolean, A boolean value indicating whether or not the first row of the file is a header row. +#' \item columnDelimiter string, The column delimiter for the file. One of "comma", "tab", or "pipe". +#' \item compression string, The type of compression of the file. One of "gzip", or "none". +#' \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -exports_list <- function(type = NULL, author = NULL, status = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +files_get <- function(id, link_expires_at = NULL, inline = NULL) { args <- as.list(match.call())[-1] - path <- "/exports/" - path_params <- list() - query_params <- list(type = type, author = author, status = status, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/files/{id}" + path_params <- list(id = id) + query_params <- list(link_expires_at = link_expires_at, inline = inline) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -6231,303 +7124,192 @@ exports_list <- function(type = NULL, author = NULL, status = NULL, hidden = NUL } -#' Create a CSV Export -#' @param source list required. A list containing the following elements: -#' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' } -#' @param destination list required. A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } -#' } -#' @param name string optional. The name of this Csv Export job. -#' @param include_header boolean optional. A boolean value indicating whether or not the header should be included. Defaults to true. -#' @param compression string optional. The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip". -#' @param column_delimiter string optional. The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". -#' @param hidden boolean optional. A boolean value indicating whether or not this request should be hidden. Defaults to false. -#' @param force_multifile boolean optional. Whether or not the csv should be split into multiple files. Default: false -#' @param max_file_size integer optional. The max file size, in MB, created files will be. Only available when force_multifile is true. +#' Update details about a file +#' @param id integer required. The ID of the file. +#' @param name string required. The file name. The extension must match the previous extension. +#' @param expires_at string required. The date and time the file will expire. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this Csv Export job.} -#' \item{name}{string, The name of this Csv Export job.} -#' \item{source}{list, A list containing the following elements: +#' \item{id}{integer, The ID of the file.} +#' \item{name}{string, The file name.} +#' \item{createdAt}{string, The date and time the file was created.} +#' \item{fileSize}{integer, The file size.} +#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: +#' \item{downloadUrl}{string, A JSON string containing information about the URL of the file.} +#' \item{fileUrl}{string, The URL that may be used to download the file.} +#' \item{detectedInfo}{list, A list containing the following elements: #' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } +#' \item includeHeader boolean, A boolean value indicating whether or not the first row of the file is a header row. +#' \item columnDelimiter string, The column delimiter for the file. One of "comma", "tab", or "pipe". +#' \item compression string, The type of compression of the file. One of "gzip", or "none". +#' \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" #' }} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} -#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} -#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} -#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} -#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -exports_post_files_csv <- function(source, destination, name = NULL, include_header = NULL, compression = NULL, column_delimiter = NULL, hidden = NULL, force_multifile = NULL, max_file_size = NULL) { +files_put <- function(id, name, expires_at) { args <- as.list(match.call())[-1] - path <- "/exports/files/csv" - path_params <- list() + path <- "/files/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list(source = source, destination = destination, name = name, includeHeader = include_header, compression = compression, columnDelimiter = column_delimiter, hidden = hidden, forceMultifile = force_multifile, maxFileSize = max_file_size) + body_params <- list(name = name, expiresAt = expires_at) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get a CSV Export -#' @param id integer required. +#' Update details about a file +#' @param id integer required. The ID of the file. +#' @param name string optional. The file name. The extension must match the previous extension. +#' @param expires_at string optional. The date and time the file will expire. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this Csv Export job.} -#' \item{name}{string, The name of this Csv Export job.} -#' \item{source}{list, A list containing the following elements: +#' \item{id}{integer, The ID of the file.} +#' \item{name}{string, The file name.} +#' \item{createdAt}{string, The date and time the file was created.} +#' \item{fileSize}{integer, The file size.} +#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: +#' \item{downloadUrl}{string, A JSON string containing information about the URL of the file.} +#' \item{fileUrl}{string, The URL that may be used to download the file.} +#' \item{detectedInfo}{list, A list containing the following elements: #' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } +#' \item includeHeader boolean, A boolean value indicating whether or not the first row of the file is a header row. +#' \item columnDelimiter string, The column delimiter for the file. One of "comma", "tab", or "pipe". +#' \item compression string, The type of compression of the file. One of "gzip", or "none". +#' \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" #' }} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} -#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} -#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} -#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} -#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -exports_get_files_csv <- function(id) { +files_patch <- function(id, name = NULL, expires_at = NULL) { args <- as.list(match.call())[-1] - path <- "/exports/files/csv/{id}" + path <- "/files/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(name = name, expiresAt = expires_at) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Replace all attributes of this CSV Export -#' @param id integer required. The ID of this Csv Export job. -#' @param source list required. A list containing the following elements: -#' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' } -#' @param destination list required. A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } -#' } -#' @param name string optional. The name of this Csv Export job. -#' @param include_header boolean optional. A boolean value indicating whether or not the header should be included. Defaults to true. -#' @param compression string optional. The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip". -#' @param column_delimiter string optional. The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". -#' @param hidden boolean optional. A boolean value indicating whether or not this request should be hidden. Defaults to false. -#' @param force_multifile boolean optional. Whether or not the csv should be split into multiple files. Default: false -#' @param max_file_size integer optional. The max file size, in MB, created files will be. Only available when force_multifile is true. +#' Create a Preprocess CSV +#' @param file_id integer required. The ID of the file. +#' @param in_place boolean optional. If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true. +#' @param detect_table_columns boolean optional. If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false. +#' @param force_character_set_conversion boolean optional. If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII). +#' @param include_header boolean optional. A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present. +#' @param column_delimiter string optional. The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected. +#' @param hidden boolean optional. The hidden status of the item. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this Csv Export job.} -#' \item{name}{string, The name of this Csv Export job.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } -#' }} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} -#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} -#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} -#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} -#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{id}{integer, The ID of the job created.} +#' \item{fileId}{integer, The ID of the file.} +#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} +#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} +#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} +#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -exports_put_files_csv <- function(id, source, destination, name = NULL, include_header = NULL, compression = NULL, column_delimiter = NULL, hidden = NULL, force_multifile = NULL, max_file_size = NULL) { +files_post_preprocess_csv <- function(file_id, in_place = NULL, detect_table_columns = NULL, force_character_set_conversion = NULL, include_header = NULL, column_delimiter = NULL, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/exports/files/csv/{id}" - path_params <- list(id = id) + path <- "/files/preprocess/csv" + path_params <- list() query_params <- list() - body_params <- list(source = source, destination = destination, name = name, includeHeader = include_header, compression = compression, columnDelimiter = column_delimiter, hidden = hidden, forceMultifile = force_multifile, maxFileSize = max_file_size) + body_params <- list(fileId = file_id, inPlace = in_place, detectTableColumns = detect_table_columns, forceCharacterSetConversion = force_character_set_conversion, includeHeader = include_header, columnDelimiter = column_delimiter, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Update some attributes of this CSV Export -#' @param id integer required. The ID of this Csv Export job. -#' @param name string optional. The name of this Csv Export job. -#' @param source list optional. A list containing the following elements: -#' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' } -#' @param destination list optional. A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } -#' } -#' @param include_header boolean optional. A boolean value indicating whether or not the header should be included. Defaults to true. -#' @param compression string optional. The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip". -#' @param column_delimiter string optional. The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". -#' @param hidden boolean optional. A boolean value indicating whether or not this request should be hidden. Defaults to false. -#' @param force_multifile boolean optional. Whether or not the csv should be split into multiple files. Default: false -#' @param max_file_size integer optional. The max file size, in MB, created files will be. Only available when force_multifile is true. +#' Get a Preprocess CSV +#' @param id integer required. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this Csv Export job.} -#' \item{name}{string, The name of this Csv Export job.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } -#' }} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} -#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} -#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} -#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} -#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{id}{integer, The ID of the job created.} +#' \item{fileId}{integer, The ID of the file.} +#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} +#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} +#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} +#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -exports_patch_files_csv <- function(id, name = NULL, source = NULL, destination = NULL, include_header = NULL, compression = NULL, column_delimiter = NULL, hidden = NULL, force_multifile = NULL, max_file_size = NULL) { +files_get_preprocess_csv <- function(id) { args <- as.list(match.call())[-1] - path <- "/exports/files/csv/{id}" + path <- "/files/preprocess/csv/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, source = source, destination = destination, includeHeader = include_header, compression = compression, columnDelimiter = column_delimiter, hidden = hidden, forceMultifile = force_multifile, maxFileSize = max_file_size) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Replace all attributes of this Preprocess CSV +#' @param id integer required. The ID of the job created. +#' @param file_id integer required. The ID of the file. +#' @param in_place boolean optional. If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true. +#' @param detect_table_columns boolean optional. If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false. +#' @param force_character_set_conversion boolean optional. If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII). +#' @param include_header boolean optional. A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present. +#' @param column_delimiter string optional. The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this Csv Export job.} -#' \item{name}{string, The name of this Csv Export job.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item sql string, The SQL query for this Csv Export job -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item filenamePrefix string, The prefix of the name of the file returned to the user. -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item filePath string, The path within the bucket where the exported file will be saved. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/" -#' \item storageHostId integer, The ID of the destination storage host. -#' \item credentialId integer, The ID of the credentials for the destination storage host. -#' \item existingFiles string, Notifies the job of what to do in the case that the exported file already exists at the provided path.One of: fail, append, overwrite. Default: fail. If "append" is specified,the new file will always be added to the provided path. If "overwrite" is specifiedall existing files at the provided path will be deleted and the new file will be added.By default, or if "fail" is specified, the export will fail if a file exists at the provided path. -#' } -#' }} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the header should be included. Defaults to true.} -#' \item{compression}{string, The compression of the output file. Valid arguments are "gzip" and "none". Defaults to "gzip".} -#' \item{columnDelimiter}{string, The column delimiter for the output file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} -#' \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} -#' \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +#' \item{id}{integer, The ID of the job created.} +#' \item{fileId}{integer, The ID of the file.} +#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} +#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} +#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} +#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -exports_put_files_csv_archive <- function(id, status) { +files_put_preprocess_csv <- function(id, file_id, in_place = NULL, detect_table_columns = NULL, force_character_set_conversion = NULL, include_header = NULL, column_delimiter = NULL) { args <- as.list(match.call())[-1] - path <- "/exports/files/csv/{id}/archive" + path <- "/files/preprocess/csv/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(status = status) + body_params <- list(fileId = file_id, inPlace = in_place, detectTableColumns = detect_table_columns, forceCharacterSetConversion = force_character_set_conversion, includeHeader = include_header, columnDelimiter = column_delimiter) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -6538,102 +7320,85 @@ exports_put_files_csv_archive <- function(id, status) { } -#' List feature flags +#' Update some attributes of this Preprocess CSV +#' @param id integer required. The ID of the job created. +#' @param file_id integer optional. The ID of the file. +#' @param in_place boolean optional. If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true. +#' @param detect_table_columns boolean optional. If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false. +#' @param force_character_set_conversion boolean optional. If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII). +#' @param include_header boolean optional. A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present. +#' @param column_delimiter string optional. The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected. #' -#' @return An array containing the following fields: -#' \item{name}{string, The name of the feature flag.} -#' \item{userCount}{integer, The number of users with this feature flag enabled.} -#' \item{description}{string, } -#' \item{team}{string, } -#' \item{jira}{string, } -#' \item{added}{string, } -#' \item{groupCount}{integer, } -#' \item{organizationCount}{integer, } -#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} -#' \item{activeForMe}{boolean, Whether the feature flag is active for the current user.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the job created.} +#' \item{fileId}{integer, The ID of the file.} +#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} +#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} +#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} +#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -feature_flags_list <- function() { +files_patch_preprocess_csv <- function(id, file_id = NULL, in_place = NULL, detect_table_columns = NULL, force_character_set_conversion = NULL, include_header = NULL, column_delimiter = NULL) { args <- as.list(match.call())[-1] - path <- "/feature_flags/" - path_params <- list() + path <- "/files/preprocess/csv/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(fileId = file_id, inPlace = in_place, detectTableColumns = detect_table_columns, forceCharacterSetConversion = force_character_set_conversion, includeHeader = include_header, columnDelimiter = column_delimiter) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Show a feature flag -#' @param name string required. The name of the feature flag. +#' Archive a Preprocess CSV (deprecated, use archiving endpoints instead) +#' @param id integer required. #' -#' @return A list containing the following elements: -#' \item{name}{string, The name of the feature flag} -#' \item{organizations}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, Organization ID -#' \item name string, Organization name -#' \item slug string, Organization slug -#' }} -#' \item{groups}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, Group ID -#' \item name string, Group name -#' \item slug string, Group slug -#' \item users array, Users within the group -#' }} -#' \item{users}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, User ID -#' \item username string, Username -#' }} -#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} +#' @return An empty HTTP response #' @export -feature_flags_get <- function(name) { +files_delete_preprocess_csv <- function(id) { args <- as.list(match.call())[-1] - path <- "/feature_flags/{name}" - path_params <- list(name = name) + path <- "/files/preprocess/csv/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Activate a feature for a user -#' @param flag_name string required. The feature flag name. -#' @param user_id integer required. The user ID. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: -#' \item{name}{string, The name of the feature flag.} -#' \item{userCount}{integer, The number of users with this feature flag enabled.} -#' \item{description}{string, } -#' \item{team}{string, } -#' \item{jira}{string, } -#' \item{added}{string, } -#' \item{groupCount}{integer, } -#' \item{organizationCount}{integer, } -#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} -#' \item{activeForMe}{boolean, Whether the feature flag is active for the current user.} +#' \item{id}{integer, The ID of the job created.} +#' \item{fileId}{integer, The ID of the file.} +#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} +#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} +#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} +#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} +#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -feature_flags_put_users <- function(flag_name, user_id) { +files_put_preprocess_csv_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/feature_flags/{flag_name}/users/{user_id}" - path_params <- list(flag_name = flag_name, user_id = user_id) + path <- "/files/preprocess/csv/{id}/archive" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -6644,73 +7409,97 @@ feature_flags_put_users <- function(flag_name, user_id) { } -#' Deactivate a feature for a user -#' @param flag_name string required. The feature flag name. -#' @param user_id integer required. The user ID. +#' List bookmarked git repositories +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to repo_url. Must be one of: repo_url, created_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this git repository.} +#' \item{repoUrl}{string, The URL for this git repository.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' @export -feature_flags_delete_users <- function(flag_name, user_id) { +git_repos_list <- function(limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/feature_flags/{flag_name}/users/{user_id}" - path_params <- list(flag_name = flag_name, user_id = user_id) - query_params <- list() + path <- "/git_repos/" + path_params <- list() + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Activate a feature for a group -#' @param flag_name string required. The feature flag name. -#' @param group_id integer required. Group ID. +#' Bookmark a git repository +#' @param repo_url string required. The URL for this git repository. #' #' @return A list containing the following elements: -#' \item{name}{string, The name of the feature flag.} -#' \item{userCount}{integer, The number of users with this feature flag enabled.} -#' \item{description}{string, } -#' \item{team}{string, } -#' \item{jira}{string, } -#' \item{added}{string, } -#' \item{groupCount}{integer, } -#' \item{organizationCount}{integer, } -#' \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} -#' \item{activeForMe}{boolean, Whether the feature flag is active for the current user.} +#' \item{id}{integer, The ID for this git repository.} +#' \item{repoUrl}{string, The URL for this git repository.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' @export -feature_flags_put_groups <- function(flag_name, group_id) { +git_repos_post <- function(repo_url) { args <- as.list(match.call())[-1] - path <- "/feature_flags/{flag_name}/groups/{group_id}" - path_params <- list(flag_name = flag_name, group_id = group_id) + path <- "/git_repos/" + path_params <- list() + query_params <- list() + body_params <- list(repoUrl = repo_url) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get a bookmarked git repository +#' @param id integer required. The ID for this git repository. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for this git repository.} +#' \item{repoUrl}{string, The URL for this git repository.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' @export +git_repos_get <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/git_repos/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Deactivate a feature for a group -#' @param flag_name string required. The feature flag name. -#' @param group_id integer required. Group ID. +#' Remove the bookmark on a git repository +#' @param id integer required. The ID for this git repository. #' #' @return An empty HTTP response #' @export -feature_flags_delete_groups <- function(flag_name, group_id) { +git_repos_delete <- function(id) { args <- as.list(match.call())[-1] - path <- "/feature_flags/{flag_name}/groups/{group_id}" - path_params <- list(flag_name = flag_name, group_id = group_id) + path <- "/git_repos/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -6723,13 +7512,55 @@ feature_flags_delete_groups <- function(flag_name, group_id) { } -#' List the projects a File belongs to -#' @param id integer required. The ID of the File. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' Get all branches and tags of a bookmarked git repository +#' @param id integer required. The ID for this git repository. +#' +#' @return A list containing the following elements: +#' \item{branches}{array, List of branch names of this git repository.} +#' \item{tags}{array, List of tag names of this git repository.} +#' @export +git_repos_list_refs <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/git_repos/{id}/refs" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List Groups +#' @param query string optional. If specified, it will filter the groups returned. +#' @param permission string optional. A permissions string, one of "read", "write", or "manage". Lists only groups for which the current user has that permission. +#' @param include_members boolean optional. Show members of the group. +#' @param organization_id integer optional. The organization by which to filter groups. +#' @param user_ids array optional. A list of user IDs to filter groups by.Groups will be returned if any of the users is a member +#' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to name. Must be one of: name, created_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: +#' \item{id}{integer, The ID of this group.} +#' \item{name}{string, This group's name.} +#' \item{createdAt}{string, The date and time when this group was created.} +#' \item{updatedAt}{string, The date and time when this group was last updated.} +#' \item{description}{string, The description of the group.} +#' \item{slug}{string, The slug for this group.} +#' \item{organizationId}{integer, The ID of the organization this group belongs to.} +#' \item{organizationName}{string, The name of the organization this group belongs to.} +#' \item{memberCount}{integer, The number of active members in this group.} +#' \item{totalMemberCount}{integer, The total number of members in this group.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +#' \item{createdById}{integer, The ID of the user who created this group.} +#' \item{members}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -6737,27 +7568,124 @@ feature_flags_delete_groups <- function(flag_name, group_id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: +#' @export +groups_list <- function(query = NULL, permission = NULL, include_members = NULL, organization_id = NULL, user_ids = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/groups/" + path_params <- list() + query_params <- list(query = query, permission = permission, include_members = include_members, organization_id = organization_id, user_ids = user_ids, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Create a Group +#' @param name string required. This group's name. +#' @param description string optional. The description of the group. +#' @param slug string optional. The slug for this group. +#' @param organization_id integer optional. The ID of the organization this group belongs to. +#' @param default_otp_required_for_login boolean optional. The two factor authentication requirement for this group. +#' @param role_ids array optional. An array of ids of all the roles this group has. +#' @param default_time_zone string optional. The default time zone of this group. +#' @param default_jobs_label string optional. The default partition label for jobs of this group. +#' @param default_notebooks_label string optional. The default partition label for notebooks of this group. +#' @param default_services_label string optional. The default partition label for services of this group. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this group.} +#' \item{name}{string, This group's name.} +#' \item{createdAt}{string, The date and time when this group was created.} +#' \item{updatedAt}{string, The date and time when this group was last updated.} +#' \item{description}{string, The description of the group.} +#' \item{slug}{string, The slug for this group.} +#' \item{organizationId}{integer, The ID of the organization this group belongs to.} +#' \item{organizationName}{string, The name of the organization this group belongs to.} +#' \item{memberCount}{integer, The number of active members in this group.} +#' \item{totalMemberCount}{integer, The total number of members in this group.} +#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} +#' \item{roleIds}{array, An array of ids of all the roles this group has.} +#' \item{defaultTimeZone}{string, The default time zone of this group.} +#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +#' \item{defaultServicesLabel}{string, The default partition label for services of this group.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +#' \item{createdById}{integer, The ID of the user who created this group.} +#' \item{members}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. #' \item username string, This user's username. #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. +#' \item email string, This user's email address. +#' \item primaryGroupId integer, The ID of the primary group of this user. +#' \item active boolean, Whether this user account is active or deactivated. #' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} #' @export -files_list_projects <- function(id, hidden = NULL) { +groups_post <- function(name, description = NULL, slug = NULL, organization_id = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, default_jobs_label = NULL, default_notebooks_label = NULL, default_services_label = NULL) { args <- as.list(match.call())[-1] - path <- "/files/{id}/projects" + path <- "/groups/" + path_params <- list() + query_params <- list() + body_params <- list(name = name, description = description, slug = slug, organizationId = organization_id, defaultOtpRequiredForLogin = default_otp_required_for_login, roleIds = role_ids, defaultTimeZone = default_time_zone, defaultJobsLabel = default_jobs_label, defaultNotebooksLabel = default_notebooks_label, defaultServicesLabel = default_services_label) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get a Group +#' @param id integer required. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this group.} +#' \item{name}{string, This group's name.} +#' \item{createdAt}{string, The date and time when this group was created.} +#' \item{updatedAt}{string, The date and time when this group was last updated.} +#' \item{description}{string, The description of the group.} +#' \item{slug}{string, The slug for this group.} +#' \item{organizationId}{integer, The ID of the organization this group belongs to.} +#' \item{organizationName}{string, The name of the organization this group belongs to.} +#' \item{memberCount}{integer, The number of active members in this group.} +#' \item{totalMemberCount}{integer, The total number of members in this group.} +#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} +#' \item{roleIds}{array, An array of ids of all the roles this group has.} +#' \item{defaultTimeZone}{string, The default time zone of this group.} +#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +#' \item{defaultServicesLabel}{string, The default partition label for services of this group.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +#' \item{createdById}{integer, The ID of the user who created this group.} +#' \item{members}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' \item email string, This user's email address. +#' \item primaryGroupId integer, The ID of the primary group of this user. +#' \item active boolean, Whether this user account is active or deactivated. +#' }} +#' @export +groups_get <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/groups/{id}" path_params <- list(id = id) - query_params <- list(hidden = hidden) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -6769,19 +7697,57 @@ files_list_projects <- function(id, hidden = NULL) { } -#' Add a File to a project -#' @param id integer required. The ID of the File. -#' @param project_id integer required. The ID of the project. +#' Replace all attributes of this Group +#' @param id integer required. The ID of this group. +#' @param name string required. This group's name. +#' @param description string optional. The description of the group. +#' @param slug string optional. The slug for this group. +#' @param organization_id integer optional. The ID of the organization this group belongs to. +#' @param default_otp_required_for_login boolean optional. The two factor authentication requirement for this group. +#' @param role_ids array optional. An array of ids of all the roles this group has. +#' @param default_time_zone string optional. The default time zone of this group. +#' @param default_jobs_label string optional. The default partition label for jobs of this group. +#' @param default_notebooks_label string optional. The default partition label for notebooks of this group. +#' @param default_services_label string optional. The default partition label for services of this group. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this group.} +#' \item{name}{string, This group's name.} +#' \item{createdAt}{string, The date and time when this group was created.} +#' \item{updatedAt}{string, The date and time when this group was last updated.} +#' \item{description}{string, The description of the group.} +#' \item{slug}{string, The slug for this group.} +#' \item{organizationId}{integer, The ID of the organization this group belongs to.} +#' \item{organizationName}{string, The name of the organization this group belongs to.} +#' \item{memberCount}{integer, The number of active members in this group.} +#' \item{totalMemberCount}{integer, The total number of members in this group.} +#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} +#' \item{roleIds}{array, An array of ids of all the roles this group has.} +#' \item{defaultTimeZone}{string, The default time zone of this group.} +#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +#' \item{defaultServicesLabel}{string, The default partition label for services of this group.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +#' \item{createdById}{integer, The ID of the user who created this group.} +#' \item{members}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' \item email string, This user's email address. +#' \item primaryGroupId integer, The ID of the primary group of this user. +#' \item active boolean, Whether this user account is active or deactivated. +#' }} #' @export -files_put_projects <- function(id, project_id) { +groups_put <- function(id, name, description = NULL, slug = NULL, organization_id = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, default_jobs_label = NULL, default_notebooks_label = NULL, default_services_label = NULL) { args <- as.list(match.call())[-1] - path <- "/files/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/groups/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(name = name, description = description, slug = slug, organizationId = organization_id, defaultOtpRequiredForLogin = default_otp_required_for_login, roleIds = role_ids, defaultTimeZone = default_time_zone, defaultJobsLabel = default_jobs_label, defaultNotebooksLabel = default_notebooks_label, defaultServicesLabel = default_services_label) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -6792,17 +7758,77 @@ files_put_projects <- function(id, project_id) { } -#' Remove a File from a project -#' @param id integer required. The ID of the File. -#' @param project_id integer required. The ID of the project. +#' Update some attributes of this Group +#' @param id integer required. The ID of this group. +#' @param name string optional. This group's name. +#' @param description string optional. The description of the group. +#' @param slug string optional. The slug for this group. +#' @param organization_id integer optional. The ID of the organization this group belongs to. +#' @param default_otp_required_for_login boolean optional. The two factor authentication requirement for this group. +#' @param role_ids array optional. An array of ids of all the roles this group has. +#' @param default_time_zone string optional. The default time zone of this group. +#' @param default_jobs_label string optional. The default partition label for jobs of this group. +#' @param default_notebooks_label string optional. The default partition label for notebooks of this group. +#' @param default_services_label string optional. The default partition label for services of this group. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this group.} +#' \item{name}{string, This group's name.} +#' \item{createdAt}{string, The date and time when this group was created.} +#' \item{updatedAt}{string, The date and time when this group was last updated.} +#' \item{description}{string, The description of the group.} +#' \item{slug}{string, The slug for this group.} +#' \item{organizationId}{integer, The ID of the organization this group belongs to.} +#' \item{organizationName}{string, The name of the organization this group belongs to.} +#' \item{memberCount}{integer, The number of active members in this group.} +#' \item{totalMemberCount}{integer, The total number of members in this group.} +#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} +#' \item{roleIds}{array, An array of ids of all the roles this group has.} +#' \item{defaultTimeZone}{string, The default time zone of this group.} +#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +#' \item{defaultServicesLabel}{string, The default partition label for services of this group.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +#' \item{createdById}{integer, The ID of the user who created this group.} +#' \item{members}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' \item email string, This user's email address. +#' \item primaryGroupId integer, The ID of the primary group of this user. +#' \item active boolean, Whether this user account is active or deactivated. +#' }} +#' @export +groups_patch <- function(id, name = NULL, description = NULL, slug = NULL, organization_id = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, default_jobs_label = NULL, default_notebooks_label = NULL, default_services_label = NULL) { + + args <- as.list(match.call())[-1] + path <- "/groups/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list(name = name, description = description, slug = slug, organizationId = organization_id, defaultOtpRequiredForLogin = default_otp_required_for_login, roleIds = role_ids, defaultTimeZone = default_time_zone, defaultJobsLabel = default_jobs_label, defaultNotebooksLabel = default_notebooks_label, defaultServicesLabel = default_services_label) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Delete a Group (deprecated) +#' @param id integer required. #' #' @return An empty HTTP response #' @export -files_delete_projects <- function(id, project_id) { +groups_delete <- function(id) { args <- as.list(match.call())[-1] - path <- "/files/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/groups/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -6837,10 +7863,10 @@ files_delete_projects <- function(id, project_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -files_list_shares <- function(id) { +groups_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/files/{id}/shares" + path <- "/groups/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -6880,10 +7906,10 @@ files_list_shares <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -files_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +groups_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/files/{id}/shares/users" + path <- "/groups/{id}/shares/users" path_params <- list(id = id) query_params <- list() body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -6903,10 +7929,10 @@ files_put_shares_users <- function(id, user_ids, permission_level, share_email_b #' #' @return An empty HTTP response #' @export -files_delete_shares_users <- function(id, user_id) { +groups_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/files/{id}/shares/users/{user_id}" + path <- "/groups/{id}/shares/users/{user_id}" path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() @@ -6946,10 +7972,10 @@ files_delete_shares_users <- function(id, user_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -files_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +groups_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/files/{id}/shares/groups" + path <- "/groups/{id}/shares/groups" path_params <- list(id = id) query_params <- list() body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -6969,10 +7995,10 @@ files_put_shares_groups <- function(id, group_ids, permission_level, share_email #' #' @return An empty HTTP response #' @export -files_delete_shares_groups <- function(id, group_id) { +groups_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/files/{id}/shares/groups/{group_id}" + path <- "/groups/{id}/shares/groups/{group_id}" path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() @@ -6986,123 +8012,146 @@ files_delete_shares_groups <- function(id, group_id) { } -#' Initiate an upload of a file into the platform -#' @param name string required. The file name. -#' @param expires_at string optional. The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null. +#' Add a user to a group +#' @param id integer required. The ID of the group. +#' @param user_id integer required. The ID of the user. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the file.} -#' \item{name}{string, The file name.} -#' \item{createdAt}{string, The date and time the file was created.} -#' \item{fileSize}{integer, The file size.} -#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} -#' \item{uploadUrl}{string, The URL that may be used to upload a file. To use the upload URL, initiate a POST request to the given URL with the file you wish to import as the "file" form field.} -#' \item{uploadFields}{list, A hash containing the form fields to be included with the POST request.} +#' \item{id}{integer, The ID of this group.} +#' \item{name}{string, This group's name.} +#' \item{createdAt}{string, The date and time when this group was created.} +#' \item{updatedAt}{string, The date and time when this group was last updated.} +#' \item{description}{string, The description of the group.} +#' \item{slug}{string, The slug for this group.} +#' \item{organizationId}{integer, The ID of the organization this group belongs to.} +#' \item{organizationName}{string, The name of the organization this group belongs to.} +#' \item{memberCount}{integer, The number of active members in this group.} +#' \item{totalMemberCount}{integer, The total number of members in this group.} +#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} +#' \item{roleIds}{array, An array of ids of all the roles this group has.} +#' \item{defaultTimeZone}{string, The default time zone of this group.} +#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +#' \item{defaultServicesLabel}{string, The default partition label for services of this group.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +#' \item{createdById}{integer, The ID of the user who created this group.} +#' \item{members}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' \item email string, This user's email address. +#' \item primaryGroupId integer, The ID of the primary group of this user. +#' \item active boolean, Whether this user account is active or deactivated. +#' }} #' @export -files_post <- function(name, expires_at = NULL) { +groups_put_members <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/files/" - path_params <- list() + path <- "/groups/{id}/members/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() - body_params <- list(name = name, expiresAt = expires_at) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Initiate a multipart upload -#' @param name string required. The file name. -#' @param num_parts integer required. The number of parts in which the file will be uploaded. This parameter determines the number of presigned URLs that are returned. -#' @param expires_at string optional. The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null. +#' Remove a user from a group +#' @param id integer required. The ID of the group. +#' @param user_id integer required. The ID of the user. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the file.} -#' \item{name}{string, The file name.} -#' \item{createdAt}{string, The date and time the file was created.} -#' \item{fileSize}{integer, The file size.} -#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} -#' \item{uploadUrls}{array, An array of URLs that may be used to upload file parts. Use separate PUT requests to complete the part uploads. Links expire after 12 hours.} +#' @return An empty HTTP response #' @export -files_post_multipart <- function(name, num_parts, expires_at = NULL) { +groups_delete_members <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/files/multipart" - path_params <- list() + path <- "/groups/{id}/members/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() - body_params <- list(name = name, numParts = num_parts, expiresAt = expires_at) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Complete a multipart upload -#' @param id integer required. The ID of the file. +#' Get child groups of this group +#' @param id integer required. The ID of this group. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{manageable}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, +#' \item name string, +#' }} +#' \item{writeable}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, +#' \item name string, +#' }} +#' \item{readable}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, +#' \item name string, +#' }} #' @export -files_post_multipart_complete <- function(id) { +groups_list_child_groups <- function(id) { args <- as.list(match.call())[-1] - path <- "/files/multipart/{id}/complete" + path <- "/groups/{id}/child_groups" path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Get details about a file -#' @param id integer required. The ID of the file. -#' @param link_expires_at string optional. The date and time the download link will expire. Must be a time between now and 36 hours from now. Defaults to 30 minutes from now. -#' @param inline boolean optional. If true, will return a url that can be displayed inline in HTML +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the file.} -#' \item{name}{string, The file name.} -#' \item{createdAt}{string, The date and time the file was created.} -#' \item{fileSize}{integer, The file size.} -#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} -#' \item{author}{list, A list containing the following elements: +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item users array, +#' \item groups array, #' }} -#' \item{downloadUrl}{string, A JSON string containing information about the URL of the file.} -#' \item{fileUrl}{string, The URL that may be used to download the file.} -#' \item{detectedInfo}{list, A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item includeHeader boolean, A boolean value indicating whether or not the first row of the file is a header row. -#' \item columnDelimiter string, The column delimiter for the file. One of "comma", "tab", or "pipe". -#' \item compression string, The type of compression of the file. One of "gzip", or "none". -#' \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, #' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -files_get <- function(id, link_expires_at = NULL, inline = NULL) { +imports_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/files/{id}" + path <- "/imports/{id}/shares" path_params <- list(id = id) - query_params <- list(link_expires_at = link_expires_at, inline = inline) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -7114,42 +8163,39 @@ files_get <- function(id, link_expires_at = NULL, inline = NULL) { } -#' Update details about a file -#' @param id integer required. The ID of the file. -#' @param name string required. The file name. The extension must match the previous extension. -#' @param expires_at string required. The date and time the file will expire. +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the file.} -#' \item{name}{string, The file name.} -#' \item{createdAt}{string, The date and time the file was created.} -#' \item{fileSize}{integer, The file size.} -#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} -#' \item{author}{list, A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item users array, +#' \item groups array, #' }} -#' \item{downloadUrl}{string, A JSON string containing information about the URL of the file.} -#' \item{fileUrl}{string, The URL that may be used to download the file.} -#' \item{detectedInfo}{list, A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item includeHeader boolean, A boolean value indicating whether or not the first row of the file is a header row. -#' \item columnDelimiter string, The column delimiter for the file. One of "comma", "tab", or "pipe". -#' \item compression string, The type of compression of the file. One of "gzip", or "none". -#' \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, #' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -files_put <- function(id, name, expires_at) { +imports_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/files/{id}" + path <- "/imports/{id}/shares/users" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, expiresAt = expires_at) + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -7160,111 +8206,117 @@ files_put <- function(id, name, expires_at) { } -#' Update details about a file -#' @param id integer required. The ID of the file. -#' @param name string optional. The file name. The extension must match the previous extension. -#' @param expires_at string optional. The date and time the file will expire. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the file.} -#' \item{name}{string, The file name.} -#' \item{createdAt}{string, The date and time the file was created.} -#' \item{fileSize}{integer, The file size.} -#' \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{downloadUrl}{string, A JSON string containing information about the URL of the file.} -#' \item{fileUrl}{string, The URL that may be used to download the file.} -#' \item{detectedInfo}{list, A list containing the following elements: -#' \itemize{ -#' \item includeHeader boolean, A boolean value indicating whether or not the first row of the file is a header row. -#' \item columnDelimiter string, The column delimiter for the file. One of "comma", "tab", or "pipe". -#' \item compression string, The type of compression of the file. One of "gzip", or "none". -#' \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" -#' }} +#' @return An empty HTTP response #' @export -files_patch <- function(id, name = NULL, expires_at = NULL) { +imports_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/files/{id}" - path_params <- list(id = id) + path <- "/imports/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() - body_params <- list(name = name, expiresAt = expires_at) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Create a Preprocess CSV -#' @param file_id integer required. The ID of the file. -#' @param in_place boolean optional. If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true. -#' @param detect_table_columns boolean optional. If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false. -#' @param force_character_set_conversion boolean optional. If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII). -#' @param include_header boolean optional. A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present. -#' @param column_delimiter string optional. The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected. -#' @param hidden boolean optional. The hidden status of the item. +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the job created.} -#' \item{fileId}{integer, The ID of the file.} -#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} -#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} -#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} -#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -files_post_preprocess_csv <- function(file_id, in_place = NULL, detect_table_columns = NULL, force_character_set_conversion = NULL, include_header = NULL, column_delimiter = NULL, hidden = NULL) { +imports_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/files/preprocess/csv" - path_params <- list() + path <- "/imports/{id}/shares/groups" + path_params <- list(id = id) query_params <- list() - body_params <- list(fileId = file_id, inPlace = in_place, detectTableColumns = detect_table_columns, forceCharacterSetConversion = force_character_set_conversion, includeHeader = include_header, columnDelimiter = column_delimiter, hidden = hidden) + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get a Preprocess CSV -#' @param id integer required. +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the job created.} -#' \item{fileId}{integer, The ID of the file.} -#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} -#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} -#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} -#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} -#' \item{hidden}{boolean, The hidden status of the item.} +#' @return An empty HTTP response #' @export -files_get_preprocess_csv <- function(id) { +imports_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/files/preprocess/csv/{id}" - path_params <- list(id = id) + path <- "/imports/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +imports_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/imports/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) @@ -7272,32 +8324,31 @@ files_get_preprocess_csv <- function(id) { } -#' Replace all attributes of this Preprocess CSV -#' @param id integer required. The ID of the job created. -#' @param file_id integer required. The ID of the file. -#' @param in_place boolean optional. If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true. -#' @param detect_table_columns boolean optional. If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false. -#' @param force_character_set_conversion boolean optional. If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII). -#' @param include_header boolean optional. A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present. -#' @param column_delimiter string optional. The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected. +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the job created.} -#' \item{fileId}{integer, The ID of the file.} -#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} -#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} -#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} -#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} #' @export -files_put_preprocess_csv <- function(id, file_id, in_place = NULL, detect_table_columns = NULL, force_character_set_conversion = NULL, include_header = NULL, column_delimiter = NULL) { +imports_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/files/preprocess/csv/{id}" + path <- "/imports/{id}/transfer" path_params <- list(id = id) query_params <- list() - body_params <- list(fileId = file_id, inPlace = in_place, detectTableColumns = detect_table_columns, forceCharacterSetConversion = force_character_set_conversion, includeHeader = include_header, columnDelimiter = column_delimiter) + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -7308,52 +8359,86 @@ files_put_preprocess_csv <- function(id, file_id, in_place = NULL, detect_table_ } -#' Update some attributes of this Preprocess CSV -#' @param id integer required. The ID of the job created. -#' @param file_id integer optional. The ID of the file. -#' @param in_place boolean optional. If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true. -#' @param detect_table_columns boolean optional. If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false. -#' @param force_character_set_conversion boolean optional. If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII). -#' @param include_header boolean optional. A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present. -#' @param column_delimiter string optional. The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected. +#' List the projects an Import belongs to +#' @param id integer required. The ID of the Import. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the job created.} -#' \item{fileId}{integer, The ID of the file.} -#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} -#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} -#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} -#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} -#' \item{hidden}{boolean, The hidden status of the item.} +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this project.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -files_patch_preprocess_csv <- function(id, file_id = NULL, in_place = NULL, detect_table_columns = NULL, force_character_set_conversion = NULL, include_header = NULL, column_delimiter = NULL) { +imports_list_projects <- function(id, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/files/preprocess/csv/{id}" + path <- "/imports/{id}/projects" path_params <- list(id = id) + query_params <- list(hidden = hidden) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Add an Import to a project +#' @param id integer required. The ID of the Import. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +imports_put_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/imports/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() - body_params <- list(fileId = file_id, inPlace = in_place, detectTableColumns = detect_table_columns, forceCharacterSetConversion = force_character_set_conversion, includeHeader = include_header, columnDelimiter = column_delimiter) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Archive a Preprocess CSV (deprecated, use archiving endpoints instead) -#' @param id integer required. +#' Remove an Import from a project +#' @param id integer required. The ID of the Import. +#' @param project_id integer required. The ID of the project. #' #' @return An empty HTTP response #' @export -files_delete_preprocess_csv <- function(id) { +imports_delete_projects <- function(id, project_id) { args <- as.list(match.call())[-1] - path <- "/files/preprocess/csv/{id}" - path_params <- list(id = id) + path <- "/imports/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -7371,19 +8456,93 @@ files_delete_preprocess_csv <- function(id) { #' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the job created.} -#' \item{fileId}{integer, The ID of the file.} -#' \item{inPlace}{boolean, If true, the file is cleaned in place. If false, a new file ID is created. Defaults to true.} -#' \item{detectTableColumns}{boolean, If true, detect the table columns in the file including the sql types. If false, skip table column detection.Defaults to false.} -#' \item{forceCharacterSetConversion}{boolean, If true, the file will always be converted to UTF-8 and any character that cannot be converted will be discarded. If false, the character set conversion will only run if the detected character set is not compatible with UTF-8 (e.g., UTF-8, ASCII).} -#' \item{includeHeader}{boolean, A boolean value indicating whether or not the first row of the file is a header row. If not provided, will attempt to auto-detect whether a header row is present.} -#' \item{columnDelimiter}{string, The column delimiter for the file. One of "comma", "tab", or "pipe". If not provided, the column delimiter will be auto-detected.} +#' \item{name}{string, The name of the import.} +#' \item{syncType}{string, The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce.} +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' \item name string, +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' \item name string, +#' }} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{parentId}{integer, Parent id to trigger this import from} +#' \item{id}{integer, The ID for the import.} +#' \item{isOutbound}{boolean, } +#' \item{jobType}{string, The job type of this import.} +#' \item{syncs}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, +#' \item source object, +#' \item destination object, +#' \item advancedOptions object, +#' }} +#' \item{state}{string, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{nextRunAt}{string, The time of the next scheduled run.} +#' \item{timeZone}{string, The time zone of this import.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -files_put_preprocess_csv_archive <- function(id, status) { +imports_put_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/files/preprocess/csv/{id}/archive" + path <- "/imports/{id}/archive" path_params <- list(id = id) query_params <- list() body_params <- list(status = status) @@ -7397,24 +8556,77 @@ files_put_preprocess_csv_archive <- function(id, status) { } -#' List bookmarked git repositories -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' List Imports +#' @param type string optional. If specified, return imports of these types. It accepts a comma-separated list, possible values are 'AutoImport', 'DbSync', 'Salesforce', 'GdocImport'. +#' @param destination string optional. If specified, returns imports with one of these destinations. It accepts a comma-separated list of remote host ids. +#' @param source string optional. If specified, returns imports with one of these sources. It accepts a comma-separated list of remote host ids. 'DbSync' must be specified for 'type'. +#' @param status string optional. If specified, returns imports with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param archived string optional. The archival status of the requested item(s). +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to repo_url. Must be one of: repo_url, created_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for this git repository.} -#' \item{repoUrl}{string, The URL for this git repository.} +#' \item{name}{string, The name of the import.} +#' \item{syncType}{string, The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce.} +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' \item name string, +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' \item name string, +#' }} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{id}{integer, The ID for the import.} +#' \item{isOutbound}{boolean, } +#' \item{jobType}{string, The job type of this import.} +#' \item{state}{string, } #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{timeZone}{string, The time zone of this import.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -git_repos_list <- function(limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +imports_list <- function(type = NULL, destination = NULL, source = NULL, status = NULL, author = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/git_repos/" + path <- "/imports/" path_params <- list() - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(type = type, destination = destination, source = source, status = status, author = author, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -7426,22 +8638,140 @@ git_repos_list <- function(limit = NULL, page_num = NULL, order = NULL, order_di } -#' Bookmark a git repository -#' @param repo_url string required. The URL for this git repository. +#' Create a new import configuration +#' @param name string required. The name of the import. +#' @param sync_type string required. The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce. +#' @param is_outbound boolean required. +#' @param source list optional. A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' } +#' @param destination list optional. A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' } +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param parent_id integer optional. Parent id to trigger this import from +#' @param next_run_at string optional. The time of the next scheduled run. +#' @param time_zone string optional. The time zone of this import. +#' @param hidden boolean optional. The hidden status of the item. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for this git repository.} -#' \item{repoUrl}{string, The URL for this git repository.} +#' \item{name}{string, The name of the import.} +#' \item{syncType}{string, The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce.} +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' \item name string, +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item remoteHostId integer, +#' \item credentialId integer, +#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. +#' \item name string, +#' }} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{parentId}{integer, Parent id to trigger this import from} +#' \item{id}{integer, The ID for the import.} +#' \item{isOutbound}{boolean, } +#' \item{jobType}{string, The job type of this import.} +#' \item{syncs}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, +#' \item source object, +#' \item destination object, +#' \item advancedOptions object, +#' }} +#' \item{state}{string, } #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{nextRunAt}{string, The time of the next scheduled run.} +#' \item{timeZone}{string, The time zone of this import.} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -git_repos_post <- function(repo_url) { +imports_post <- function(name, sync_type, is_outbound, source = NULL, destination = NULL, schedule = NULL, notifications = NULL, parent_id = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/git_repos/" + path <- "/imports/" path_params <- list() query_params <- list() - body_params <- list(repoUrl = repo_url) + body_params <- list(name = name, syncType = sync_type, isOutbound = is_outbound, source = source, destination = destination, schedule = schedule, notifications = notifications, parentId = parent_id, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -7452,86 +8782,99 @@ git_repos_post <- function(repo_url) { } -#' Get a bookmarked git repository -#' @param id integer required. The ID for this git repository. +#' Initate an import of a tabular file into the platform +#' @param schema string required. The schema of the destination table. +#' @param name string required. The name of the destination table. +#' @param remote_host_id integer required. The id of the destination database host. +#' @param credential_id integer required. The id of the credentials to be used when performing the database import. +#' @param max_errors integer optional. The maximum number of rows with errors to remove from the import before failing. +#' @param existing_table_rows string optional. The behaviour if a table with the requested name already exists. One of "fail", "truncate", "append", or "drop".Defaults to "fail". +#' @param diststyle string optional. The diststyle to use for the table. One of "even", "all", or "key". +#' @param distkey string optional. The column to use as the distkey for the table. +#' @param sortkey1 string optional. The column to use as the sort key for the table. +#' @param sortkey2 string optional. The second column in a compound sortkey for the table. +#' @param column_delimiter string optional. The column delimiter of the file. If column_delimiter is null or omitted, it will be auto-detected. Valid arguments are "comma", "tab", and "pipe". +#' @param first_row_is_header boolean optional. A boolean value indicating whether or not the first row is a header row. If first_row_is_header is null or omitted, it will be auto-detected. +#' @param multipart boolean optional. If true, the upload URI will require a `multipart/form-data` POST request. Defaults to false. +#' @param escaped boolean optional. If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. +#' @param hidden boolean optional. The hidden status of the item. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for this git repository.} -#' \item{repoUrl}{string, The URL for this git repository.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } +#' \item{id}{integer, The id of the import.} +#' \item{uploadUri}{string, The URI which may be used to upload a tabular file for import. You must use this URI to upload the file you wish imported and then inform the Civis API when your upload is complete using the URI given by the runUri field of this response.} +#' \item{runUri}{string, The URI to POST to once the file upload is complete. After uploading the file using the URI given in the uploadUri attribute of the response, POST to this URI to initiate the import of your uploaded file into the platform.} +#' \item{uploadFields}{list, If multipart was set to true, these fields should be included in the multipart upload.} #' @export -git_repos_get <- function(id) { +imports_post_files <- function(schema, name, remote_host_id, credential_id, max_errors = NULL, existing_table_rows = NULL, diststyle = NULL, distkey = NULL, sortkey1 = NULL, sortkey2 = NULL, column_delimiter = NULL, first_row_is_header = NULL, multipart = NULL, escaped = NULL, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/git_repos/{id}" - path_params <- list(id = id) + path <- "/imports/files" + path_params <- list() query_params <- list() - body_params <- list() + body_params <- list(schema = schema, name = name, remoteHostId = remote_host_id, credentialId = credential_id, maxErrors = max_errors, existingTableRows = existing_table_rows, diststyle = diststyle, distkey = distkey, sortkey1 = sortkey1, sortkey2 = sortkey2, columnDelimiter = column_delimiter, firstRowIsHeader = first_row_is_header, multipart = multipart, escaped = escaped, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Remove the bookmark on a git repository -#' @param id integer required. The ID for this git repository. +#' Start a run +#' @param id integer required. The ID of the import. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the run.} +#' \item{importId}{integer, The ID of the import.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -git_repos_delete <- function(id) { +imports_post_files_runs <- function(id) { args <- as.list(match.call())[-1] - path <- "/git_repos/{id}" + path <- "/imports/files/{id}/runs" path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List Groups -#' @param query string optional. If specified, it will filter the groups returned. Infix matching is supported (e.g., "query=group" will return "group" and "group of people" and "my group" and "my group of people"). -#' @param permission string optional. A permissions string, one of "read", "write", or "manage". Lists only groups for which the current user has that permission. -#' @param include_members boolean optional. Show members of the group. -#' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. +#' List runs for the given import +#' @param id integer required. The ID of the import. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to name. Must be one of: name, created_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of this group.} -#' \item{name}{string, This group's name.} -#' \item{createdAt}{string, The date and time when this group was created.} -#' \item{slug}{string, The slug for this group.} -#' \item{organizationId}{integer, The ID of the organization this group belongs to.} -#' \item{organizationName}{string, The name of the organization this group belongs to.} -#' \item{memberCount}{integer, The total number of members in this group.} -#' \item{members}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} +#' \item{id}{integer, The ID of the run.} +#' \item{importId}{integer, The ID of the import.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -groups_list <- function(query = NULL, permission = NULL, include_members = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +imports_list_files_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/groups/" - path_params <- list() - query_params <- list(query = query, permission = permission, include_members = include_members, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/imports/files/{id}/runs" + path_params <- list(id = id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -7543,94 +8886,25 @@ groups_list <- function(query = NULL, permission = NULL, include_members = NULL, } -#' Create a Group -#' @param name string required. This group's name. -#' @param description string optional. The description of the group. -#' @param slug string optional. The slug for this group. -#' @param organization_id integer optional. The ID of the organization this group belongs to. -#' @param must_agree_to_eula boolean optional. Whether or not members of this group must sign the EULA. -#' @param default_otp_required_for_login boolean optional. The two factor authentication requirement for this group. -#' @param role_ids array optional. An array of ids of all the roles this group has. -#' @param default_time_zone string optional. The default time zone of this group. -#' @param default_jobs_label string optional. The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. -#' @param default_notebooks_label string optional. The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. -#' @param default_services_label string optional. The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this group.} -#' \item{name}{string, This group's name.} -#' \item{createdAt}{string, The date and time when this group was created.} -#' \item{description}{string, The description of the group.} -#' \item{slug}{string, The slug for this group.} -#' \item{organizationId}{integer, The ID of the organization this group belongs to.} -#' \item{organizationName}{string, The name of the organization this group belongs to.} -#' \item{memberCount}{integer, The total number of members in this group.} -#' \item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} -#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} -#' \item{roleIds}{array, An array of ids of all the roles this group has.} -#' \item{defaultTimeZone}{string, The default time zone of this group.} -#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{members}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' @export -groups_post <- function(name, description = NULL, slug = NULL, organization_id = NULL, must_agree_to_eula = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, default_jobs_label = NULL, default_notebooks_label = NULL, default_services_label = NULL) { - - args <- as.list(match.call())[-1] - path <- "/groups/" - path_params <- list() - query_params <- list() - body_params <- list(name = name, description = description, slug = slug, organizationId = organization_id, mustAgreeToEula = must_agree_to_eula, defaultOtpRequiredForLogin = default_otp_required_for_login, roleIds = role_ids, defaultTimeZone = default_time_zone, defaultJobsLabel = default_jobs_label, defaultNotebooksLabel = default_notebooks_label, defaultServicesLabel = default_services_label) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Get a Group -#' @param id integer required. +#' Check status of a run +#' @param id integer required. The ID of the import. +#' @param run_id integer required. The ID of the run. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this group.} -#' \item{name}{string, This group's name.} -#' \item{createdAt}{string, The date and time when this group was created.} -#' \item{description}{string, The description of the group.} -#' \item{slug}{string, The slug for this group.} -#' \item{organizationId}{integer, The ID of the organization this group belongs to.} -#' \item{organizationName}{string, The name of the organization this group belongs to.} -#' \item{memberCount}{integer, The total number of members in this group.} -#' \item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} -#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} -#' \item{roleIds}{array, An array of ids of all the roles this group has.} -#' \item{defaultTimeZone}{string, The default time zone of this group.} -#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{members}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} +#' \item{id}{integer, The ID of the run.} +#' \item{importId}{integer, The ID of the import.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -groups_get <- function(id) { +imports_get_files_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/groups/{id}" - path_params <- list(id = id) + path <- "/imports/files/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -7643,278 +8917,347 @@ groups_get <- function(id) { } -#' Replace all attributes of this Group -#' @param id integer required. The ID of this group. -#' @param name string required. This group's name. -#' @param description string optional. The description of the group. -#' @param slug string optional. The slug for this group. -#' @param organization_id integer optional. The ID of the organization this group belongs to. -#' @param must_agree_to_eula boolean optional. Whether or not members of this group must sign the EULA. -#' @param default_otp_required_for_login boolean optional. The two factor authentication requirement for this group. -#' @param role_ids array optional. An array of ids of all the roles this group has. -#' @param default_time_zone string optional. The default time zone of this group. -#' @param default_jobs_label string optional. The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. -#' @param default_notebooks_label string optional. The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. -#' @param default_services_label string optional. The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. +#' Cancel a run +#' @param id integer required. The ID of the import. +#' @param run_id integer required. The ID of the run. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this group.} -#' \item{name}{string, This group's name.} -#' \item{createdAt}{string, The date and time when this group was created.} -#' \item{description}{string, The description of the group.} -#' \item{slug}{string, The slug for this group.} -#' \item{organizationId}{integer, The ID of the organization this group belongs to.} -#' \item{organizationName}{string, The name of the organization this group belongs to.} -#' \item{memberCount}{integer, The total number of members in this group.} -#' \item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} -#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} -#' \item{roleIds}{array, An array of ids of all the roles this group has.} -#' \item{defaultTimeZone}{string, The default time zone of this group.} -#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{members}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} +#' @return An empty HTTP response #' @export -groups_put <- function(id, name, description = NULL, slug = NULL, organization_id = NULL, must_agree_to_eula = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, default_jobs_label = NULL, default_notebooks_label = NULL, default_services_label = NULL) { +imports_delete_files_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/groups/{id}" - path_params <- list(id = id) + path <- "/imports/files/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() - body_params <- list(name = name, description = description, slug = slug, organizationId = organization_id, mustAgreeToEula = must_agree_to_eula, defaultOtpRequiredForLogin = default_otp_required_for_login, roleIds = role_ids, defaultTimeZone = default_time_zone, defaultJobsLabel = default_jobs_label, defaultNotebooksLabel = default_notebooks_label, defaultServicesLabel = default_services_label) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Update some attributes of this Group -#' @param id integer required. The ID of this group. -#' @param name string optional. This group's name. -#' @param description string optional. The description of the group. -#' @param slug string optional. The slug for this group. -#' @param organization_id integer optional. The ID of the organization this group belongs to. -#' @param must_agree_to_eula boolean optional. Whether or not members of this group must sign the EULA. -#' @param default_otp_required_for_login boolean optional. The two factor authentication requirement for this group. -#' @param role_ids array optional. An array of ids of all the roles this group has. -#' @param default_time_zone string optional. The default time zone of this group. -#' @param default_jobs_label string optional. The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. -#' @param default_notebooks_label string optional. The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. -#' @param default_services_label string optional. The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future. +#' Get the logs for a run +#' @param id integer required. The ID of the import. +#' @param run_id integer required. The ID of the run. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this group.} -#' \item{name}{string, This group's name.} -#' \item{createdAt}{string, The date and time when this group was created.} -#' \item{description}{string, The description of the group.} -#' \item{slug}{string, The slug for this group.} -#' \item{organizationId}{integer, The ID of the organization this group belongs to.} -#' \item{organizationName}{string, The name of the organization this group belongs to.} -#' \item{memberCount}{integer, The total number of members in this group.} -#' \item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} -#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} -#' \item{roleIds}{array, An array of ids of all the roles this group has.} -#' \item{defaultTimeZone}{string, The default time zone of this group.} -#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{members}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} #' @export -groups_patch <- function(id, name = NULL, description = NULL, slug = NULL, organization_id = NULL, must_agree_to_eula = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, default_jobs_label = NULL, default_notebooks_label = NULL, default_services_label = NULL) { +imports_list_files_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/groups/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list(name = name, description = description, slug = slug, organizationId = organization_id, mustAgreeToEula = must_agree_to_eula, defaultOtpRequiredForLogin = default_otp_required_for_login, roleIds = role_ids, defaultTimeZone = default_time_zone, defaultJobsLabel = default_jobs_label, defaultNotebooksLabel = default_notebooks_label, defaultServicesLabel = default_services_label) + path <- "/imports/files/{id}/runs/{run_id}/logs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(last_id = last_id, limit = limit) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Delete a Group (deprecated) -#' @param id integer required. +#' Get the logs for a run +#' @param id integer required. The ID of the import. +#' @param run_id integer required. The ID of the run. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} #' @export -groups_delete <- function(id) { +imports_list_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/groups/{id}" - path_params <- list(id = id) - query_params <- list() + path <- "/imports/{id}/runs/{run_id}/logs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(last_id = last_id, limit = limit) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' Create a CSV Import +#' @param source list required. A list containing the following elements: +#' \itemize{ +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } +#' } +#' @param destination list required. A list containing the following elements: +#' \itemize{ +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' } +#' @param first_row_is_header boolean required. A boolean value indicating whether or not the first row of the source file is a header row. +#' @param name string optional. The name of the import. +#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". +#' @param escaped boolean optional. A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false. +#' @param compression string optional. The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none". +#' @param existing_table_rows string optional. The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail". +#' @param max_errors integer optional. The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases. +#' @param table_columns array optional. An array containing the following fields: +#' \itemize{ +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. +#' } +#' @param loosen_types boolean optional. If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false. +#' @param execution string optional. In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports. +#' @param redshift_destination_options list optional. A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' } +#' @param hidden boolean optional. The hidden status of the item. #' -#' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the import.} +#' \item{name}{string, The name of the import.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} +#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} +#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} +#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} +#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} +#' \item{tableColumns}{array, An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} +#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} +#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -groups_list_shares <- function(id) { +imports_post_files_csv <- function(source, destination, first_row_is_header, name = NULL, column_delimiter = NULL, escaped = NULL, compression = NULL, existing_table_rows = NULL, max_errors = NULL, table_columns = NULL, loosen_types = NULL, execution = NULL, redshift_destination_options = NULL, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/groups/{id}/shares" - path_params <- list(id = id) + path <- "/imports/files/csv" + path_params <- list() query_params <- list() - body_params <- list() + body_params <- list(source = source, destination = destination, firstRowIsHeader = first_row_is_header, name = name, columnDelimiter = column_delimiter, escaped = escaped, compression = compression, existingTableRows = existing_table_rows, maxErrors = max_errors, tableColumns = table_columns, loosenTypes = loosen_types, execution = execution, redshiftDestinationOptions = redshift_destination_options, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Get a CSV Import +#' @param id integer required. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID for the import.} +#' \item{name}{string, The name of the import.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} +#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} +#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} +#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} +#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} +#' \item{tableColumns}{array, An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} +#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} +#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -groups_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +imports_get_files_csv <- function(id) { args <- as.list(match.call())[-1] - path <- "/groups/{id}/shares/users" + path <- "/imports/files/csv/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. -#' -#' @return An empty HTTP response -#' @export -groups_delete_shares_users <- function(id, user_id) { - - args <- as.list(match.call())[-1] - path <- "/groups/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) - query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Replace all attributes of this CSV Import +#' @param id integer required. The ID for the import. +#' @param source list required. A list containing the following elements: +#' \itemize{ +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } +#' } +#' @param destination list required. A list containing the following elements: +#' \itemize{ +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' } +#' @param first_row_is_header boolean required. A boolean value indicating whether or not the first row of the source file is a header row. +#' @param name string optional. The name of the import. +#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". +#' @param escaped boolean optional. A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false. +#' @param compression string optional. The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none". +#' @param existing_table_rows string optional. The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail". +#' @param max_errors integer optional. The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases. +#' @param table_columns array optional. An array containing the following fields: +#' \itemize{ +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. +#' } +#' @param loosen_types boolean optional. If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false. +#' @param execution string optional. In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports. +#' @param redshift_destination_options list optional. A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' } #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID for the import.} +#' \item{name}{string, The name of the import.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} +#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} +#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} +#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} +#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} +#' \item{tableColumns}{array, An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} +#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} +#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -groups_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +imports_put_files_csv <- function(id, source, destination, first_row_is_header, name = NULL, column_delimiter = NULL, escaped = NULL, compression = NULL, existing_table_rows = NULL, max_errors = NULL, table_columns = NULL, loosen_types = NULL, execution = NULL, redshift_destination_options = NULL) { args <- as.list(match.call())[-1] - path <- "/groups/{id}/shares/groups" + path <- "/imports/files/csv/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list(source = source, destination = destination, firstRowIsHeader = first_row_is_header, name = name, columnDelimiter = column_delimiter, escaped = escaped, compression = compression, existingTableRows = existing_table_rows, maxErrors = max_errors, tableColumns = table_columns, loosenTypes = loosen_types, execution = execution, redshiftDestinationOptions = redshift_destination_options) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -7925,86 +9268,119 @@ groups_put_shares_groups <- function(id, group_ids, permission_level, share_emai } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. -#' -#' @return An empty HTTP response -#' @export -groups_delete_shares_groups <- function(id, group_id) { - - args <- as.list(match.call())[-1] - path <- "/groups/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Add a user to a group -#' @param id integer required. The ID of the group. -#' @param user_id integer required. The ID of the user. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of this group.} -#' \item{name}{string, This group's name.} -#' \item{createdAt}{string, The date and time when this group was created.} -#' \item{description}{string, The description of the group.} -#' \item{slug}{string, The slug for this group.} -#' \item{organizationId}{integer, The ID of the organization this group belongs to.} -#' \item{organizationName}{string, The name of the organization this group belongs to.} -#' \item{memberCount}{integer, The total number of members in this group.} -#' \item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} -#' \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} -#' \item{roleIds}{array, An array of ids of all the roles this group has.} -#' \item{defaultTimeZone}{string, The default time zone of this group.} -#' \item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -#' \item{members}{array, An array containing the following fields: +#' Update some attributes of this CSV Import +#' @param id integer required. The ID for the import. +#' @param name string optional. The name of the import. +#' @param source list optional. A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } +#' } +#' @param destination list optional. A list containing the following elements: +#' \itemize{ +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' } +#' @param first_row_is_header boolean optional. A boolean value indicating whether or not the first row of the source file is a header row. +#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". +#' @param escaped boolean optional. A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false. +#' @param compression string optional. The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none". +#' @param existing_table_rows string optional. The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail". +#' @param max_errors integer optional. The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases. +#' @param table_columns array optional. An array containing the following fields: +#' \itemize{ +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. +#' } +#' @param loosen_types boolean optional. If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false. +#' @param execution string optional. In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports. +#' @param redshift_destination_options list optional. A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' } +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the import.} +#' \item{name}{string, The name of the import.} +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' }} +#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} +#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} +#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} +#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} +#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} +#' \item{tableColumns}{array, An array containing the following fields: +#' \itemize{ +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. +#' }} +#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} +#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} +#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. #' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -groups_put_members <- function(id, user_id) { +imports_patch_files_csv <- function(id, name = NULL, source = NULL, destination = NULL, first_row_is_header = NULL, column_delimiter = NULL, escaped = NULL, compression = NULL, existing_table_rows = NULL, max_errors = NULL, table_columns = NULL, loosen_types = NULL, execution = NULL, redshift_destination_options = NULL) { args <- as.list(match.call())[-1] - path <- "/groups/{id}/members/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/imports/files/csv/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(name = name, source = source, destination = destination, firstRowIsHeader = first_row_is_header, columnDelimiter = column_delimiter, escaped = escaped, compression = compression, existingTableRows = existing_table_rows, maxErrors = max_errors, tableColumns = table_columns, loosenTypes = loosen_types, execution = execution, redshiftDestinationOptions = redshift_destination_options) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Remove a user from a group -#' @param id integer required. The ID of the group. -#' @param user_id integer required. The ID of the user. +#' Archive a CSV Import (deprecated, use archiving endpoints instead) +#' @param id integer required. #' #' @return An empty HTTP response #' @export -groups_delete_members <- function(id, user_id) { +imports_delete_files_csv <- function(id) { args <- as.list(match.call())[-1] - path <- "/groups/{id}/members/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/imports/files/csv/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -8017,165 +9393,177 @@ groups_delete_members <- function(id, user_id) { } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' -#' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the import.} +#' \item{name}{string, The name of the import.} +#' \item{source}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). +#' \item storagePath list . A list containing the following elements: +#' \itemize{ +#' \item storageHostId integer, The ID of the source storage host. +#' \item credentialId integer, The ID of the credentials for the source storage host. +#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). +#' } #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{destination}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item schema string, The destination schema name. +#' \item table string, The destination table name. +#' \item remoteHostId integer, The ID of the destination database host. +#' \item credentialId integer, The ID of the credentials for the destination database. +#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. +#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} +#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} +#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} +#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} +#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} +#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} +#' \item{tableColumns}{array, An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item name string, The column name. +#' \item sqlType string, The SQL type of the column. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} +#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} +#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". +#' \item distkey string, Distkey for this table in Redshift +#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -imports_list_shares <- function(id) { +imports_put_files_csv_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/shares" + path <- "/imports/files/csv/{id}/archive" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Start a run +#' @param id integer required. The ID of the csv_import. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{id}{integer, The ID of the run.} +#' \item{csvImportId}{integer, The ID of the csv_import.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -imports_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +imports_post_files_csv_runs <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/shares/users" + path <- "/imports/files/csv/{id}/runs" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' List runs for the given csv_import +#' @param id integer required. The ID of the csv_import. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the run.} +#' \item{csvImportId}{integer, The ID of the csv_import.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -imports_delete_shares_users <- function(id, user_id) { +imports_list_files_csv_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) - query_params <- list() + path <- "/imports/files/csv/{id}/runs" + path_params <- list(id = id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Check status of a run +#' @param id integer required. The ID of the csv_import. +#' @param run_id integer required. The ID of the run. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{id}{integer, The ID of the run.} +#' \item{csvImportId}{integer, The ID of the csv_import.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -imports_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +imports_get_files_csv_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/shares/groups" - path_params <- list(id = id) + path <- "/imports/files/csv/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' Cancel a run +#' @param id integer required. The ID of the csv_import. +#' @param run_id integer required. The ID of the run. #' #' @return An empty HTTP response #' @export -imports_delete_shares_groups <- function(id, group_id) { +imports_delete_files_csv_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) + path <- "/imports/files/csv/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -8188,41 +9576,58 @@ imports_delete_shares_groups <- function(id, group_id) { } -#' List the projects an Import belongs to -#' @param id integer required. The ID of the Import. +#' Get the logs for a run +#' @param id integer required. The ID of the csv_import. +#' @param run_id integer required. The ID of the run. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' @export +imports_list_files_csv_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { + + args <- as.list(match.call())[-1] + path <- "/imports/files/csv/{id}/runs/{run_id}/logs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(last_id = last_id, limit = limit) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List batch imports #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, created_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{id}{integer, The ID for the import.} +#' \item{schema}{string, The destination schema name. This schema must already exist in Redshift.} +#' \item{table}{string, The destination table name, without the schema prefix. This table must already exist in Redshift.} +#' \item{remoteHostId}{integer, The ID of the destination database host.} +#' \item{state}{string, The state of the run; one of "queued", "running", "succeeded", "failed", or "cancelled".} +#' \item{startedAt}{string, The time the last run started at.} +#' \item{finishedAt}{string, The time the last run completed.} +#' \item{error}{string, The error returned by the run, if any.} #' @export -imports_list_projects <- function(id, hidden = NULL) { +imports_list_batches <- function(hidden = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/projects" - path_params <- list(id = id) - query_params <- list(hidden = hidden) + path <- "/imports/batches" + path_params <- list() + query_params <- list(hidden = hidden, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -8234,55 +9639,78 @@ imports_list_projects <- function(id, hidden = NULL) { } -#' Add an Import to a project -#' @param id integer required. The ID of the Import. -#' @param project_id integer required. The ID of the project. +#' Upload multiple files to Civis +#' @param file_ids array required. The file IDs for the import. +#' @param schema string required. The destination schema name. This schema must already exist in Redshift. +#' @param table string required. The destination table name, without the schema prefix. This table must already exist in Redshift. +#' @param remote_host_id integer required. The ID of the destination database host. +#' @param credential_id integer required. The ID of the credentials to be used when performing the database import. +#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". If unspecified, defaults to "comma". +#' @param first_row_is_header boolean optional. A boolean value indicating whether or not the first row is a header row. If unspecified, defaults to false. +#' @param compression string optional. The type of compression. Valid arguments are "gzip", "zip", and "none". If unspecified, defaults to "gzip". +#' @param hidden boolean optional. The hidden status of the item. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the import.} +#' \item{schema}{string, The destination schema name. This schema must already exist in Redshift.} +#' \item{table}{string, The destination table name, without the schema prefix. This table must already exist in Redshift.} +#' \item{remoteHostId}{integer, The ID of the destination database host.} +#' \item{state}{string, The state of the run; one of "queued", "running", "succeeded", "failed", or "cancelled".} +#' \item{startedAt}{string, The time the last run started at.} +#' \item{finishedAt}{string, The time the last run completed.} +#' \item{error}{string, The error returned by the run, if any.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -imports_put_projects <- function(id, project_id) { +imports_post_batches <- function(file_ids, schema, table, remote_host_id, credential_id, column_delimiter = NULL, first_row_is_header = NULL, compression = NULL, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/imports/batches" + path_params <- list() query_params <- list() - body_params <- list() + body_params <- list(fileIds = file_ids, schema = schema, table = table, remoteHostId = remote_host_id, credentialId = credential_id, columnDelimiter = column_delimiter, firstRowIsHeader = first_row_is_header, compression = compression, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Remove an Import from a project -#' @param id integer required. The ID of the Import. -#' @param project_id integer required. The ID of the project. +#' Get details about a batch import +#' @param id integer required. The ID for the import. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the import.} +#' \item{schema}{string, The destination schema name. This schema must already exist in Redshift.} +#' \item{table}{string, The destination table name, without the schema prefix. This table must already exist in Redshift.} +#' \item{remoteHostId}{integer, The ID of the destination database host.} +#' \item{state}{string, The state of the run; one of "queued", "running", "succeeded", "failed", or "cancelled".} +#' \item{startedAt}{string, The time the last run started at.} +#' \item{finishedAt}{string, The time the last run completed.} +#' \item{error}{string, The error returned by the run, if any.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -imports_delete_projects <- function(id, project_id) { +imports_get_batches <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/imports/batches/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Get details about an import +#' @param id integer required. The ID for the import. #' #' @return A list containing the following elements: #' \item{name}{string, The name of the import.} @@ -8304,10 +9732,11 @@ imports_delete_projects <- function(id, project_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -8365,94 +9794,14 @@ imports_delete_projects <- function(id, project_id) { #' \item{timeZone}{string, The time zone of this import.} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -imports_put_archive <- function(id, status) { +imports_get <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/archive" + path <- "/imports/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(status = status) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' List Imports -#' @param type string optional. If specified, return imports of these types. It accepts a comma-separated list, possible values are 'AutoImport', 'DbSync', 'Salesforce', 'GdocImport'. -#' @param author string optional. If specified, return imports from this author. It accepts a comma-separated list of author ids. -#' @param destination string optional. If specified, returns imports with one of these destinations. It accepts a comma-separated list of remote host ids. -#' @param source string optional. If specified, returns imports with one of these sources. It accepts a comma-separated list of remote host ids. 'DbSync' must be specified for 'type'. -#' @param status string optional. If specified, returns imports with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. -#' @param archived string optional. The archival status of the requested item(s). -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. -#' -#' @return An array containing the following fields: -#' \item{name}{string, The name of the import.} -#' \item{syncType}{string, The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' \item name string, -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' \item name string, -#' }} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{id}{integer, The ID for the import.} -#' \item{isOutbound}{boolean, } -#' \item{jobType}{string, The job type of this import.} -#' \item{state}{string, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' \item{user}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{timeZone}{string, The time zone of this import.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' @export -imports_list <- function(type = NULL, author = NULL, destination = NULL, source = NULL, status = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { - - args <- as.list(match.call())[-1] - path <- "/imports/" - path_params <- list() - query_params <- list(type = type, author = author, destination = destination, source = source, status = status, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -8464,7 +9813,8 @@ imports_list <- function(type = NULL, author = NULL, destination = NULL, source } -#' Create a new import configuration +#' Update an import +#' @param id integer required. The ID for the import. #' @param name string required. The name of the import. #' @param sync_type string required. The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce. #' @param is_outbound boolean required. @@ -8483,10 +9833,11 @@ imports_list <- function(type = NULL, author = NULL, destination = NULL, source #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -8504,7 +9855,6 @@ imports_list <- function(type = NULL, author = NULL, destination = NULL, source #' @param parent_id integer optional. Parent id to trigger this import from #' @param next_run_at string optional. The time of the next scheduled run. #' @param time_zone string optional. The time zone of this import. -#' @param hidden boolean optional. The hidden status of the item. #' #' @return A list containing the following elements: #' \item{name}{string, The name of the import.} @@ -8526,10 +9876,11 @@ imports_list <- function(type = NULL, author = NULL, destination = NULL, source #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -8587,54 +9938,66 @@ imports_list <- function(type = NULL, author = NULL, destination = NULL, source #' \item{timeZone}{string, The time zone of this import.} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -imports_post <- function(name, sync_type, is_outbound, source = NULL, destination = NULL, schedule = NULL, notifications = NULL, parent_id = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL) { +imports_put <- function(id, name, sync_type, is_outbound, source = NULL, destination = NULL, schedule = NULL, notifications = NULL, parent_id = NULL, next_run_at = NULL, time_zone = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/" - path_params <- list() + path <- "/imports/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, syncType = sync_type, isOutbound = is_outbound, source = source, destination = destination, schedule = schedule, notifications = notifications, parentId = parent_id, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden) + body_params <- list(name = name, syncType = sync_type, isOutbound = is_outbound, source = source, destination = destination, schedule = schedule, notifications = notifications, parentId = parent_id, nextRunAt = next_run_at, timeZone = time_zone) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Initate an import of a tabular file into the platform -#' @param schema string required. The schema of the destination table. -#' @param name string required. The name of the destination table. -#' @param remote_host_id integer required. The id of the destination database host. -#' @param credential_id integer required. The id of the credentials to be used when performing the database import. -#' @param max_errors integer optional. The maximum number of rows with errors to remove from the import before failing. -#' @param existing_table_rows string optional. The behaviour if a table with the requested name already exists. One of "fail", "truncate", "append", or "drop".Defaults to "fail". -#' @param diststyle string optional. The diststyle to use for the table. One of "even", "all", or "key". -#' @param distkey string optional. The column to use as the distkey for the table. -#' @param sortkey1 string optional. The column to use as the sort key for the table. -#' @param sortkey2 string optional. The second column in a compound sortkey for the table. -#' @param column_delimiter string optional. The column delimiter of the file. If column_delimiter is null or omitted, it will be auto-detected. Valid arguments are "comma", "tab", and "pipe". -#' @param first_row_is_header boolean optional. A boolean value indicating whether or not the first row is a header row. If first_row_is_header is null or omitted, it will be auto-detected. -#' @param multipart boolean optional. If true, the upload URI will require a `multipart/form-data` POST request. Defaults to false. -#' @param escaped boolean optional. If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. -#' @param hidden boolean optional. The hidden status of the item. +#' Get the run history of this import +#' @param id integer required. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, The time that the run was queued.} +#' \item{startedAt}{string, The time that the run started.} +#' \item{finishedAt}{string, The time that the run completed.} +#' \item{error}{string, The error message for this run, if present.} +#' @export +imports_list_runs <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/imports/{id}/runs" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Run an import +#' @param id integer required. The ID of the import to run. #' #' @return A list containing the following elements: -#' \item{id}{integer, The id of the import.} -#' \item{uploadUri}{string, The URI which may be used to upload a tabular file for import. You must use this URI to upload the file you wish imported and then inform the Civis API when your upload is complete using the URI given by the runUri field of this response.} -#' \item{runUri}{string, The URI to POST to once the file upload is complete. After uploading the file using the URI given in the uploadUri attribute of the response, POST to this URI to initiate the import of your uploaded file into the platform.} -#' \item{uploadFields}{list, If multipart was set to true, these fields should be included in the multipart upload.} +#' \item{runId}{integer, The ID of the new run triggered.} #' @export -imports_post_files <- function(schema, name, remote_host_id, credential_id, max_errors = NULL, existing_table_rows = NULL, diststyle = NULL, distkey = NULL, sortkey1 = NULL, sortkey2 = NULL, column_delimiter = NULL, first_row_is_header = NULL, multipart = NULL, escaped = NULL, hidden = NULL) { +imports_post_runs <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/files" - path_params <- list() + path <- "/imports/{id}/runs" + path_params <- list(id = id) query_params <- list() - body_params <- list(schema = schema, name = name, remoteHostId = remote_host_id, credentialId = credential_id, maxErrors = max_errors, existingTableRows = existing_table_rows, diststyle = diststyle, distkey = distkey, sortkey1 = sortkey1, sortkey2 = sortkey2, columnDelimiter = column_delimiter, firstRowIsHeader = first_row_is_header, multipart = multipart, escaped = escaped, hidden = hidden) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -8645,22 +10008,18 @@ imports_post_files <- function(schema, name, remote_host_id, credential_id, max_ } -#' Start a run -#' @param id integer required. The ID of the import. +#' Cancel a run +#' @param id integer required. The ID of the job. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID of the run.} -#' \item{importId}{integer, The ID of the import.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{state}{string, The state of the run, one of 'queued', 'running' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} #' @export -imports_post_files_runs <- function(id) { +imports_post_cancel <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/files/{id}/runs" + path <- "/imports/{id}/cancel" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -8674,80 +10033,356 @@ imports_post_files_runs <- function(id) { } -#' List runs for the given import -#' @param id integer required. The ID of the import. -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Create a sync +#' @param id integer required. +#' @param source list required. A list containing the following elements: +#' \itemize{ +#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item file list . +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' \item salesforce list . A list containing the following elements: +#' \itemize{ +#' \item objectName string, The Salesforce object name. +#' } +#' } +#' @param destination list required. A list containing the following elements: +#' \itemize{ +#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' } +#' @param advanced_options list optional. A list containing the following elements: +#' \itemize{ +#' \item maxErrors integer, +#' \item existingTableRows string, +#' \item diststyle string, +#' \item distkey string, +#' \item sortkey1 string, +#' \item sortkey2 string, +#' \item columnDelimiter string, +#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. +#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. +#' \item identityColumn string, +#' \item rowChunkSize integer, +#' \item wipeDestinationTable boolean, +#' \item truncateLongLines boolean, +#' \item invalidCharReplacement string, +#' \item verifyTableRowCounts boolean, +#' \item partitionColumnName string, This parameter is deprecated +#' \item partitionSchemaName string, This parameter is deprecated +#' \item partitionTableName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated +#' \item lastModifiedColumn string, +#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. +#' \item chunkingMethod string, This parameter is deprecated +#' \item firstRowIsHeader boolean, +#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" +#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. +#' \item contactLists string, +#' \item soqlQuery string, +#' \item includeDeletedRecords boolean, +#' } #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the run.} -#' \item{importId}{integer, The ID of the import.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' @return A list containing the following elements: +#' \item{id}{integer, } +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of the table or file, if available. +#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item file list . A list containing the following elements: +#' \itemize{ +#' \item id integer, The file id. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' \item salesforce list . A list containing the following elements: +#' \itemize{ +#' \item objectName string, The Salesforce object name. +#' } +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' }} +#' \item{advancedOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item maxErrors integer, +#' \item existingTableRows string, +#' \item diststyle string, +#' \item distkey string, +#' \item sortkey1 string, +#' \item sortkey2 string, +#' \item columnDelimiter string, +#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. +#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. +#' \item identityColumn string, +#' \item rowChunkSize integer, +#' \item wipeDestinationTable boolean, +#' \item truncateLongLines boolean, +#' \item invalidCharReplacement string, +#' \item verifyTableRowCounts boolean, +#' \item partitionColumnName string, This parameter is deprecated +#' \item partitionSchemaName string, This parameter is deprecated +#' \item partitionTableName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated +#' \item lastModifiedColumn string, +#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. +#' \item chunkingMethod string, This parameter is deprecated +#' \item firstRowIsHeader boolean, +#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" +#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. +#' \item contactLists string, +#' \item soqlQuery string, +#' \item includeDeletedRecords boolean, +#' }} #' @export -imports_list_files_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +imports_post_syncs <- function(id, source, destination, advanced_options = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/files/{id}/runs" + path <- "/imports/{id}/syncs" path_params <- list(id = id) - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) - body_params <- list() + query_params <- list() + body_params <- list(source = source, destination = destination, advancedOptions = advanced_options) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Check status of a run -#' @param id integer required. The ID of the import. -#' @param run_id integer required. The ID of the run. +#' Update a sync +#' @param id integer required. The ID of the import to fetch. +#' @param sync_id integer required. The ID of the sync to fetch. +#' @param source list required. A list containing the following elements: +#' \itemize{ +#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item file list . +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' \item salesforce list . A list containing the following elements: +#' \itemize{ +#' \item objectName string, The Salesforce object name. +#' } +#' } +#' @param destination list required. A list containing the following elements: +#' \itemize{ +#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' } +#' @param advanced_options list optional. A list containing the following elements: +#' \itemize{ +#' \item maxErrors integer, +#' \item existingTableRows string, +#' \item diststyle string, +#' \item distkey string, +#' \item sortkey1 string, +#' \item sortkey2 string, +#' \item columnDelimiter string, +#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. +#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. +#' \item identityColumn string, +#' \item rowChunkSize integer, +#' \item wipeDestinationTable boolean, +#' \item truncateLongLines boolean, +#' \item invalidCharReplacement string, +#' \item verifyTableRowCounts boolean, +#' \item partitionColumnName string, This parameter is deprecated +#' \item partitionSchemaName string, This parameter is deprecated +#' \item partitionTableName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated +#' \item lastModifiedColumn string, +#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. +#' \item chunkingMethod string, This parameter is deprecated +#' \item firstRowIsHeader boolean, +#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" +#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. +#' \item contactLists string, +#' \item soqlQuery string, +#' \item includeDeletedRecords boolean, +#' } #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{importId}{integer, The ID of the import.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{id}{integer, } +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of the table or file, if available. +#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item file list . A list containing the following elements: +#' \itemize{ +#' \item id integer, The file id. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' \item salesforce list . A list containing the following elements: +#' \itemize{ +#' \item objectName string, The Salesforce object name. +#' } +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' }} +#' \item{advancedOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item maxErrors integer, +#' \item existingTableRows string, +#' \item diststyle string, +#' \item distkey string, +#' \item sortkey1 string, +#' \item sortkey2 string, +#' \item columnDelimiter string, +#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. +#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. +#' \item identityColumn string, +#' \item rowChunkSize integer, +#' \item wipeDestinationTable boolean, +#' \item truncateLongLines boolean, +#' \item invalidCharReplacement string, +#' \item verifyTableRowCounts boolean, +#' \item partitionColumnName string, This parameter is deprecated +#' \item partitionSchemaName string, This parameter is deprecated +#' \item partitionTableName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated +#' \item lastModifiedColumn string, +#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. +#' \item chunkingMethod string, This parameter is deprecated +#' \item firstRowIsHeader boolean, +#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" +#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. +#' \item contactLists string, +#' \item soqlQuery string, +#' \item includeDeletedRecords boolean, +#' }} #' @export -imports_get_files_runs <- function(id, run_id) { +imports_put_syncs <- function(id, sync_id, source, destination, advanced_options = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/files/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) + path <- "/imports/{id}/syncs/{sync_id}" + path_params <- list(id = id, sync_id = sync_id) query_params <- list() - body_params <- list() + body_params <- list(source = source, destination = destination, advancedOptions = advanced_options) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Cancel a run -#' @param id integer required. The ID of the import. -#' @param run_id integer required. The ID of the run. +#' Archive a sync (deprecated, use the /archive endpoint instead) +#' @param id integer required. The ID of the import to fetch. +#' @param sync_id integer required. The ID of the sync to fetch. #' #' @return An empty HTTP response #' @export -imports_delete_files_runs <- function(id, run_id) { +imports_delete_syncs <- function(id, sync_id) { args <- as.list(match.call())[-1] - path <- "/imports/files/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) + path <- "/imports/{id}/syncs/{sync_id}" + path_params <- list(id = id, sync_id = sync_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -8760,53 +10395,162 @@ imports_delete_files_runs <- function(id, run_id) { } -#' Get the logs for a run -#' @param id integer required. The ID of the import. -#' @param run_id integer required. The ID of the run. -#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. -#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' Update the archive status of this sync +#' @param id integer required. The ID of the import to fetch. +#' @param sync_id integer required. The ID of the sync to fetch. +#' @param status boolean optional. The desired archived status of the sync. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the log.} -#' \item{createdAt}{string, The time the log was created.} -#' \item{message}{string, The log message.} -#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' @return A list containing the following elements: +#' \item{id}{integer, } +#' \item{source}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of the table or file, if available. +#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item file list . A list containing the following elements: +#' \itemize{ +#' \item id integer, The file id. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' \item salesforce list . A list containing the following elements: +#' \itemize{ +#' \item objectName string, The Salesforce object name. +#' } +#' }} +#' \item{destination}{list, A list containing the following elements: +#' \itemize{ +#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet +#' \item databaseTable list . A list containing the following elements: +#' \itemize{ +#' \item schema string, The database schema name. +#' \item table string, The database table name. +#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. +#' } +#' \item googleWorksheet list . A list containing the following elements: +#' \itemize{ +#' \item spreadsheet string, The spreadsheet document name. +#' \item spreadsheetId string, The spreadsheet document id. +#' \item worksheet string, The worksheet tab name. +#' \item worksheetId integer, The worksheet tab id. +#' } +#' }} +#' \item{advancedOptions}{list, A list containing the following elements: +#' \itemize{ +#' \item maxErrors integer, +#' \item existingTableRows string, +#' \item diststyle string, +#' \item distkey string, +#' \item sortkey1 string, +#' \item sortkey2 string, +#' \item columnDelimiter string, +#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. +#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. +#' \item identityColumn string, +#' \item rowChunkSize integer, +#' \item wipeDestinationTable boolean, +#' \item truncateLongLines boolean, +#' \item invalidCharReplacement string, +#' \item verifyTableRowCounts boolean, +#' \item partitionColumnName string, This parameter is deprecated +#' \item partitionSchemaName string, This parameter is deprecated +#' \item partitionTableName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated +#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated +#' \item lastModifiedColumn string, +#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. +#' \item chunkingMethod string, This parameter is deprecated +#' \item firstRowIsHeader boolean, +#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" +#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. +#' \item contactLists string, +#' \item soqlQuery string, +#' \item includeDeletedRecords boolean, +#' }} #' @export -imports_list_files_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { +imports_put_syncs_archive <- function(id, sync_id, status = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/files/{id}/runs/{run_id}/logs" - path_params <- list(id = id, run_id = run_id) - query_params <- list(last_id = last_id, limit = limit) - body_params <- list() + path <- "/imports/{id}/syncs/{sync_id}/archive" + path_params <- list(id = id, sync_id = sync_id) + query_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get the logs for a run -#' @param id integer required. The ID of the import. -#' @param run_id integer required. The ID of the run. -#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. -#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' List Jobs +#' @param state string optional. The job's state. One or more of queued, running, succeeded, failed, and cancelled. Specify multiple values as a comma-separated list (e.g., "A,B"). +#' @param type string optional. The job's type. Specify multiple values as a comma-separated list (e.g., "A,B"). +#' @param q string optional. Query string to search on the id, name, and job type. +#' @param permission string optional. A permissions string, one of "read", "write", or "manage". Lists only jobs for which the current user has that permission. +#' @param scheduled boolean optional. If the item is scheduled. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param archived string optional. The archival status of the requested item(s). +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. +#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of the log.} -#' \item{createdAt}{string, The time the log was created.} -#' \item{message}{string, The log message.} -#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' \item{id}{integer, } +#' \item{name}{string, } +#' \item{type}{string, } +#' \item{fromTemplateId}{integer, } +#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} #' @export -imports_list_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { +jobs_list <- function(state = NULL, type = NULL, q = NULL, permission = NULL, scheduled = NULL, hidden = NULL, archived = NULL, author = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/runs/{run_id}/logs" - path_params <- list(id = id, run_id = run_id) - query_params <- list(last_id = last_id, limit = limit) + path <- "/jobs/" + path_params <- list() + query_params <- list(state = state, type = type, q = q, permission = permission, scheduled = scheduled, hidden = hidden, archived = archived, author = author, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -8818,160 +10562,433 @@ imports_list_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { } -#' Create a CSV Import -#' @param source list required. A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: +#' Show basic job info +#' @param id integer required. The ID for this job. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, } +#' \item{name}{string, } +#' \item{type}{string, } +#' \item{fromTemplateId}{integer, } +#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{runs}{array, An array containing the following fields: #' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } -#' } -#' @param destination list required. A list containing the following elements: +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{lastRun}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. -#' } -#' @param first_row_is_header boolean required. A boolean value indicating whether or not the first row of the source file is a header row. -#' @param name string optional. The name of the import. -#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". -#' @param escaped boolean optional. A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false. -#' @param compression string optional. The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none". -#' @param existing_table_rows string optional. The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail". -#' @param max_errors integer optional. The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases. -#' @param table_columns array optional. An array containing the following fields: +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. -#' } -#' @param loosen_types boolean optional. If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false. -#' @param execution string optional. In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports. -#' @param redshift_destination_options list optional. A list containing the following elements: +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{successEmailSubject}{string, } +#' \item{successEmailBody}{string, } +#' \item{runningAsUser}{string, } +#' \item{runByUser}{string, } +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. -#' } -#' @param hidden boolean optional. The hidden status of the item. +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' @export +jobs_get <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Generate and retrieve trigger email address +#' @param id integer required. The ID for this job. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the import.} -#' \item{name}{string, The name of the import.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: +#' \item{triggerEmail}{string, Email address which may be used to trigger this job to run.} +#' @export +jobs_post_trigger_email <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/trigger_email" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Show chain of parents as a list that this job triggers from +#' @param id integer required. The ID for this job. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, } +#' \item{name}{string, } +#' \item{type}{string, } +#' \item{fromTemplateId}{integer, } +#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{runs}{array, An array containing the following fields: #' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{destination}{list, A list containing the following elements: +#' \item{lastRun}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} -#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} -#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} -#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} -#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} -#' \item{tableColumns}{array, An array containing the following fields: +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} -#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} -#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{successEmailSubject}{string, } +#' \item{successEmailBody}{string, } +#' \item{runningAsUser}{string, } +#' \item{runByUser}{string, } +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{hidden}{boolean, The hidden status of the item.} #' @export -imports_post_files_csv <- function(source, destination, first_row_is_header, name = NULL, column_delimiter = NULL, escaped = NULL, compression = NULL, existing_table_rows = NULL, max_errors = NULL, table_columns = NULL, loosen_types = NULL, execution = NULL, redshift_destination_options = NULL, hidden = NULL) { +jobs_list_parents <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/files/csv" - path_params <- list() + path <- "/jobs/{id}/parents" + path_params <- list(id = id) query_params <- list() - body_params <- list(source = source, destination = destination, firstRowIsHeader = first_row_is_header, name = name, columnDelimiter = column_delimiter, escaped = escaped, compression = compression, existingTableRows = existing_table_rows, maxErrors = max_errors, tableColumns = table_columns, loosenTypes = loosen_types, execution = execution, redshiftDestinationOptions = redshift_destination_options, hidden = hidden) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Get a CSV Import -#' @param id integer required. +#' Show nested tree of children that this job triggers +#' @param id integer required. The ID for this job. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the import.} -#' \item{name}{string, The name of the import.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: +#' \item{id}{integer, } +#' \item{name}{string, } +#' \item{type}{string, } +#' \item{fromTemplateId}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{runs}{array, An array containing the following fields: #' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{destination}{list, A list containing the following elements: +#' \item{lastRun}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} -#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} -#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} -#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} -#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} -#' \item{tableColumns}{array, An array containing the following fields: +#' \item{children}{array, } +#' @export +jobs_list_children <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/children" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List runs for the given job +#' @param id integer required. The ID for this job. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, The time that the run was queued.} +#' \item{startedAt}{string, The time that the run started.} +#' \item{finishedAt}{string, The time that the run completed.} +#' \item{error}{string, The error message for this run, if present.} +#' @export +jobs_list_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/runs" + path_params <- list(id = id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Run a job +#' @param id integer required. The ID for this job. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, The time that the run was queued.} +#' \item{startedAt}{string, The time that the run started.} +#' \item{finishedAt}{string, The time that the run completed.} +#' \item{error}{string, The error message for this run, if present.} +#' @export +jobs_post_runs <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/runs" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Check status of a job +#' @param id integer required. The ID of the Job. +#' @param run_id integer required. The ID of the Run. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, } +#' \item{state}{string, } +#' \item{createdAt}{string, The time that the run was queued.} +#' \item{startedAt}{string, The time that the run started.} +#' \item{finishedAt}{string, The time that the run completed.} +#' \item{error}{string, The error message for this run, if present.} +#' @export +jobs_get_runs <- function(id, run_id) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Cancel a run +#' @param id integer required. The ID of the Job. +#' @param run_id integer required. The ID of the Run. +#' +#' @return An empty HTTP response +#' @export +jobs_delete_runs <- function(id, run_id) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List the outputs for a run +#' @param id integer required. The ID of the job. +#' @param run_id integer required. The ID of the run. +#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, The type of the output. Valid values are File, Table, Report, Project, Credential, or JSONValue} +#' \item{objectId}{integer, The ID of the output.} +#' \item{name}{string, The name of the output.} +#' \item{link}{string, The hypermedia link to the output.} +#' \item{value}{string, } +#' @export +jobs_list_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/runs/{run_id}/outputs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the logs for a run +#' @param id integer required. The ID of the job. +#' @param run_id integer required. The ID of the run. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' @export +jobs_list_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/runs/{run_id}/logs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(last_id = last_id, limit = limit) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List the workflows a job belongs to +#' @param id integer required. +#' @param archived string optional. The archival status of the requested item(s). +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this workflow.} +#' \item{name}{string, The name of this workflow.} +#' \item{description}{string, A description of the workflow.} +#' \item{valid}{boolean, The validity of the workflow definition.} +#' \item{fileId}{string, The file id for the s3 file containing the workflow configuration.} +#' \item{user}{list, A list containing the following elements: #' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} -#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} -#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \item{state}{string, The state of the workflow. State is "running" if any execution is running, otherwise reflects most recent execution state.} +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} +#' \item{timeZone}{string, The time zone of this workflow.} +#' \item{nextExecutionAt}{string, The time of the next scheduled execution.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' @export -imports_get_files_csv <- function(id) { +jobs_list_workflows <- function(id, archived = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/files/csv/{id}" + path <- "/jobs/{id}/workflows" path_params <- list(id = id) - query_params <- list() + query_params <- list(archived = archived) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -8983,220 +11000,99 @@ imports_get_files_csv <- function(id) { } -#' Replace all attributes of this CSV Import -#' @param id integer required. The ID for the import. -#' @param source list required. A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } -#' } -#' @param destination list required. A list containing the following elements: -#' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. -#' } -#' @param first_row_is_header boolean required. A boolean value indicating whether or not the first row of the source file is a header row. -#' @param name string optional. The name of the import. -#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". -#' @param escaped boolean optional. A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false. -#' @param compression string optional. The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none". -#' @param existing_table_rows string optional. The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail". -#' @param max_errors integer optional. The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases. -#' @param table_columns array optional. An array containing the following fields: -#' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. -#' } -#' @param loosen_types boolean optional. If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false. -#' @param execution string optional. In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports. -#' @param redshift_destination_options list optional. A list containing the following elements: -#' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. -#' } +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the import.} -#' \item{name}{string, The name of the import.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } -#' }} -#' \item{destination}{list, A list containing the following elements: +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' \item users array, +#' \item groups array, #' }} -#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} -#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} -#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} -#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} -#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} -#' \item{tableColumns}{array, An array containing the following fields: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. +#' \item users array, +#' \item groups array, #' }} -#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} -#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} -#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' \item users array, +#' \item groups array, #' }} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -imports_put_files_csv <- function(id, source, destination, first_row_is_header, name = NULL, column_delimiter = NULL, escaped = NULL, compression = NULL, existing_table_rows = NULL, max_errors = NULL, table_columns = NULL, loosen_types = NULL, execution = NULL, redshift_destination_options = NULL) { +jobs_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/files/csv/{id}" + path <- "/jobs/{id}/shares" path_params <- list(id = id) query_params <- list() - body_params <- list(source = source, destination = destination, firstRowIsHeader = first_row_is_header, name = name, columnDelimiter = column_delimiter, escaped = escaped, compression = compression, existingTableRows = existing_table_rows, maxErrors = max_errors, tableColumns = table_columns, loosenTypes = loosen_types, execution = execution, redshiftDestinationOptions = redshift_destination_options) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update some attributes of this CSV Import -#' @param id integer required. The ID for the import. -#' @param name string optional. The name of the import. -#' @param source list optional. A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } -#' } -#' @param destination list optional. A list containing the following elements: -#' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. -#' } -#' @param first_row_is_header boolean optional. A boolean value indicating whether or not the first row of the source file is a header row. -#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma". -#' @param escaped boolean optional. A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false. -#' @param compression string optional. The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none". -#' @param existing_table_rows string optional. The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail". -#' @param max_errors integer optional. The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases. -#' @param table_columns array optional. An array containing the following fields: -#' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. -#' } -#' @param loosen_types boolean optional. If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false. -#' @param execution string optional. In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports. -#' @param redshift_destination_options list optional. A list containing the following elements: -#' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. -#' } +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the import.} -#' \item{name}{string, The name of the import.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } -#' }} -#' \item{destination}{list, A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' \item users array, +#' \item groups array, #' }} -#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} -#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} -#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} -#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} -#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} -#' \item{tableColumns}{array, An array containing the following fields: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. +#' \item users array, +#' \item groups array, #' }} -#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} -#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} -#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' \item users array, +#' \item groups array, #' }} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -imports_patch_files_csv <- function(id, name = NULL, source = NULL, destination = NULL, first_row_is_header = NULL, column_delimiter = NULL, escaped = NULL, compression = NULL, existing_table_rows = NULL, max_errors = NULL, table_columns = NULL, loosen_types = NULL, execution = NULL, redshift_destination_options = NULL) { +jobs_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/files/csv/{id}" + path <- "/jobs/{id}/shares/users" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, source = source, destination = destination, firstRowIsHeader = first_row_is_header, columnDelimiter = column_delimiter, escaped = escaped, compression = compression, existingTableRows = existing_table_rows, maxErrors = max_errors, tableColumns = table_columns, loosenTypes = loosen_types, execution = execution, redshiftDestinationOptions = redshift_destination_options) + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Archive a CSV Import (deprecated, use archiving endpoints instead) -#' @param id integer required. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. #' #' @return An empty HTTP response #' @export -imports_delete_files_csv <- function(id) { +jobs_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/imports/files/csv/{id}" - path_params <- list(id = id) + path <- "/jobs/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -9209,60 +11105,39 @@ imports_delete_files_csv <- function(id) { } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the import.} -#' \item{name}{string, The name of the import.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item fileIds array, The file ID(s) to import, if importing Civis file(s). -#' \item storagePath list . A list containing the following elements: -#' \itemize{ -#' \item storageHostId integer, The ID of the source storage host. -#' \item credentialId integer, The ID of the credentials for the source storage host. -#' \item filePaths array, The file or directory path(s) within the bucket from which to import. E.g. the file_path for "s3://mybucket/files/all/" would be "/files/all/"If specifying a directory path, the job will import every file found under that path. All files must have the same column layout and file format (e.g., compression, columnDelimiter, etc.). -#' } -#' }} -#' \item{destination}{list, A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The destination schema name. -#' \item table string, The destination table name. -#' \item remoteHostId integer, The ID of the destination database host. -#' \item credentialId integer, The ID of the credentials for the destination database. -#' \item primaryKeys array, A list of column(s) which together uniquely identify a row in the destination table.These columns must not contain NULL values. If the import mode is "upsert", this field is required;see the Civis Helpdesk article on "Advanced CSV Imports via the Civis API" for more information. -#' \item lastModifiedKeys array, A list of the columns indicating a record has been updated.If the destination table does not exist, and the import mode is "upsert", this field is required. +#' \item users array, +#' \item groups array, #' }} -#' \item{firstRowIsHeader}{boolean, A boolean value indicating whether or not the first row of the source file is a header row.} -#' \item{columnDelimiter}{string, The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". Defaults to "comma".} -#' \item{escaped}{boolean, A boolean value indicating whether or not the source file has quotes escaped with a backslash.Defaults to false.} -#' \item{compression}{string, The type of compression of the source file. Valid arguments are "gzip" and "none". Defaults to "none".} -#' \item{existingTableRows}{string, The behavior if a destination table with the requested name already exists. One of "fail", "truncate", "append", "drop", or "upsert".Defaults to "fail".} -#' \item{maxErrors}{integer, The maximum number of rows with errors to ignore before failing. This option is not supported for Postgres databases.} -#' \item{tableColumns}{array, An array containing the following fields: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item name string, The column name. -#' \item sqlType string, The SQL type of the column. +#' \item users array, +#' \item groups array, #' }} -#' \item{loosenTypes}{boolean, If true, SQL types with precisions/lengths will have these values increased to accommodate data growth in future loads. Type loosening only occurs on table creation. Defaults to false.} -#' \item{execution}{string, In upsert mode, controls the movement of data in upsert mode. If set to "delayed", the data will be moved after a brief delay. If set to "immediate", the data will be moved immediately. In non-upsert modes, controls the speed at which detailed column stats appear in the data catalogue. Defaults to "delayed", to accommodate concurrent upserts to the same table and speedier non-upsert imports.} -#' \item{redshiftDestinationOptions}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item diststyle string, The diststyle to use for the table. One of "even", "all", or "key". -#' \item distkey string, Distkey for this table in Redshift -#' \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. +#' \item users array, +#' \item groups array, #' }} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -imports_put_files_csv_archive <- function(id, status) { +jobs_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/files/csv/{id}/archive" + path <- "/jobs/{id}/shares/groups" path_params <- list(id = id) query_params <- list() - body_params <- list(status = status) + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -9273,175 +11148,100 @@ imports_put_files_csv_archive <- function(id, status) { } -#' List batch imports -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, created_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for the import.} -#' \item{schema}{string, The destination schema name. This schema must already exist in Redshift.} -#' \item{table}{string, The destination table name, without the schema prefix. This table must already exist in Redshift.} -#' \item{remoteHostId}{integer, The ID of the destination database host.} -#' \item{state}{string, The state of the run; one of "queued", "running", "succeeded", "failed", or "cancelled".} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error returned by the run, if any.} +#' @return An empty HTTP response #' @export -imports_list_batches <- function(hidden = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +jobs_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/imports/batches" - path_params <- list() - query_params <- list(hidden = hidden, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/jobs/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Upload multiple files to Civis -#' @param file_ids array required. The file IDs for the import. -#' @param schema string required. The destination schema name. This schema must already exist in Redshift. -#' @param table string required. The destination table name, without the schema prefix. This table must already exist in Redshift. -#' @param remote_host_id integer required. The ID of the destination database host. -#' @param credential_id integer required. The ID of the credentials to be used when performing the database import. -#' @param column_delimiter string optional. The column delimiter for the file. Valid arguments are "comma", "tab", and "pipe". If unspecified, defaults to "comma". -#' @param first_row_is_header boolean optional. A boolean value indicating whether or not the first row is a header row. If unspecified, defaults to false. -#' @param compression string optional. The type of compression. Valid arguments are "gzip", "zip", and "none". If unspecified, defaults to "gzip". -#' @param hidden boolean optional. The hidden status of the item. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the import.} -#' \item{schema}{string, The destination schema name. This schema must already exist in Redshift.} -#' \item{table}{string, The destination table name, without the schema prefix. This table must already exist in Redshift.} -#' \item{remoteHostId}{integer, The ID of the destination database host.} -#' \item{state}{string, The state of the run; one of "queued", "running", "succeeded", "failed", or "cancelled".} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error returned by the run, if any.} -#' \item{hidden}{boolean, The hidden status of the item.} +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -imports_post_batches <- function(file_ids, schema, table, remote_host_id, credential_id, column_delimiter = NULL, first_row_is_header = NULL, compression = NULL, hidden = NULL) { +jobs_list_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/batches" - path_params <- list() - query_params <- list() - body_params <- list(fileIds = file_ids, schema = schema, table = table, remoteHostId = remote_host_id, credentialId = credential_id, columnDelimiter = column_delimiter, firstRowIsHeader = first_row_is_header, compression = compression, hidden = hidden) + path <- "/jobs/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Get details about a batch import -#' @param id integer required. The ID for the import. +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the import.} -#' \item{schema}{string, The destination schema name. This schema must already exist in Redshift.} -#' \item{table}{string, The destination table name, without the schema prefix. This table must already exist in Redshift.} -#' \item{remoteHostId}{integer, The ID of the destination database host.} -#' \item{state}{string, The state of the run; one of "queued", "running", "succeeded", "failed", or "cancelled".} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error returned by the run, if any.} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} #' @export -imports_get_batches <- function(id) { +jobs_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/batches/{id}" + path <- "/jobs/{id}/transfer" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get details about an import -#' @param id integer required. The ID for the import. +#' List the projects a Job belongs to +#' @param id integer required. The ID of the Job. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' -#' @return A list containing the following elements: -#' \item{name}{string, The name of the import.} -#' \item{syncType}{string, The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' \item name string, -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' \item name string, -#' }} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{parentId}{integer, Parent id to trigger this import from} -#' \item{id}{integer, The ID for the import.} -#' \item{isOutbound}{boolean, } -#' \item{jobType}{string, The job type of this import.} -#' \item{syncs}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, -#' \item source object, -#' \item destination object, -#' \item advancedOptions object, -#' }} -#' \item{state}{string, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' \item{user}{list, A list containing the following elements: +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this project.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -9449,7 +11249,9 @@ imports_get_batches <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{runningAs}{list, A list containing the following elements: +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -9457,122 +11259,95 @@ imports_get_batches <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{nextRunAt}{string, The time of the next scheduled run.} -#' \item{timeZone}{string, The time zone of this import.} -#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' \item{archived}{string, The archival status of the requested item(s).} #' @export -imports_get <- function(id) { +jobs_list_projects <- function(id, hidden = NULL) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/projects" + path_params <- list(id = id) + query_params <- list(hidden = hidden) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Add a Job to a project +#' @param id integer required. The ID of the Job. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +jobs_put_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/jobs/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Remove a Job from a project +#' @param id integer required. The ID of the Job. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +jobs_delete_projects <- function(id, project_id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}" - path_params <- list(id = id) + path <- "/jobs/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Update an import -#' @param id integer required. The ID for the import. -#' @param name string required. The name of the import. -#' @param sync_type string required. The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce. -#' @param is_outbound boolean required. -#' @param source list optional. A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' } -#' @param destination list optional. A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' } -#' @param schedule list optional. A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param notifications list optional. A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param parent_id integer optional. Parent id to trigger this import from -#' @param next_run_at string optional. The time of the next scheduled run. -#' @param time_zone string optional. The time zone of this import. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: -#' \item{name}{string, The name of the import.} -#' \item{syncType}{string, The type of sync to perform; one of Dbsync, AutoImport, GdocImport, GdocExport, and Salesforce.} -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' \item name string, -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item remoteHostId integer, -#' \item credentialId integer, -#' \item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id. For DB Syncs, the first element is an SSL private key credential id, and the second element is the corresponding public key credential id. -#' \item name string, -#' }} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{parentId}{integer, Parent id to trigger this import from} -#' \item{id}{integer, The ID for the import.} -#' \item{isOutbound}{boolean, } -#' \item{jobType}{string, The job type of this import.} -#' \item{syncs}{array, An array containing the following fields: +#' \item{id}{integer, } +#' \item{name}{string, } +#' \item{type}{string, } +#' \item{fromTemplateId}{integer, } +#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{runs}{array, An array containing the following fields: #' \itemize{ #' \item id integer, -#' \item source object, -#' \item destination object, -#' \item advancedOptions object, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{state}{string, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } #' \item{lastRun}{list, A list containing the following elements: #' \itemize{ #' \item id integer, @@ -9582,7 +11357,9 @@ imports_get <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} -#' \item{user}{list, A list containing the following elements: +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -9590,26 +11367,28 @@ imports_get <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{runningAs}{list, A list containing the following elements: +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{successEmailSubject}{string, } +#' \item{successEmailBody}{string, } +#' \item{runningAsUser}{string, } +#' \item{runByUser}{string, } +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{nextRunAt}{string, The time of the next scheduled run.} -#' \item{timeZone}{string, The time zone of this import.} -#' \item{hidden}{boolean, The hidden status of the item.} -#' \item{archived}{string, The archival status of the requested item(s).} #' @export -imports_put <- function(id, name, sync_type, is_outbound, source = NULL, destination = NULL, schedule = NULL, notifications = NULL, parent_id = NULL, next_run_at = NULL, time_zone = NULL) { +jobs_put_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/imports/{id}" + path <- "/jobs/{id}/archive" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, syncType = sync_type, isOutbound = is_outbound, source = source, destination = destination, schedule = schedule, notifications = notifications, parentId = parent_id, nextRunAt = next_run_at, timeZone = time_zone) + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -9620,411 +11399,222 @@ imports_put <- function(id, name, sync_type, is_outbound, source = NULL, destina } -#' Get the run history of this import -#' @param id integer required. +#' Create a JSON Value +#' @param value_str string required. The JSON value to store. Should be a serialized JSON string. Limited to 1000000 bytes. +#' @param name string optional. The name of the JSON Value. #' -#' @return An array containing the following fields: -#' \item{id}{integer, } -#' \item{state}{string, } -#' \item{createdAt}{string, The time that the run was queued.} -#' \item{startedAt}{string, The time that the run started.} -#' \item{finishedAt}{string, The time that the run completed.} -#' \item{error}{string, The error message for this run, if present.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the JSON Value.} +#' \item{name}{string, The name of the JSON Value.} +#' \item{value}{string, The deserialized JSON value.} #' @export -imports_list_runs <- function(id) { +json_values_post <- function(value_str, name = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/runs" - path_params <- list(id = id) + path <- "/json_values/" + path_params <- list() query_params <- list() - body_params <- list() + body_params <- list(valueStr = value_str, name = name) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Run an import -#' @param id integer required. The ID of the import to run. +#' Get details about a JSON Value +#' @param id integer required. The ID of the JSON Value. #' #' @return A list containing the following elements: -#' \item{runId}{integer, The ID of the new run triggered.} +#' \item{id}{integer, The ID of the JSON Value.} +#' \item{name}{string, The name of the JSON Value.} +#' \item{value}{string, The deserialized JSON value.} #' @export -imports_post_runs <- function(id) { +json_values_get <- function(id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/runs" + path <- "/json_values/{id}" path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Cancel a run -#' @param id integer required. The ID of the job. +#' Update some attributes of this JSON Value +#' @param id integer required. The ID of the JSON Value. +#' @param name string optional. The name of the JSON Value. +#' @param value_str string optional. The JSON value to store. Should be a serialized JSON string. Limited to 1000000 bytes. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{state}{string, The state of the run, one of 'queued', 'running' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{id}{integer, The ID of the JSON Value.} +#' \item{name}{string, The name of the JSON Value.} +#' \item{value}{string, The deserialized JSON value.} #' @export -imports_post_cancel <- function(id) { +json_values_patch <- function(id, name = NULL, value_str = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/cancel" + path <- "/json_values/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(name = name, valueStr = value_str) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Create a sync -#' @param id integer required. -#' @param source list required. A list containing the following elements: -#' \itemize{ -#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item file list . -#' \item googleWorksheet list . A list containing the following elements: -#' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' \item salesforce list . A list containing the following elements: -#' \itemize{ -#' \item objectName string, The Salesforce object name. -#' } -#' } -#' @param destination list required. A list containing the following elements: -#' \itemize{ -#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet -#' \item databaseTable list . A list containing the following elements: +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. +#' +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item googleWorksheet list . A list containing the following elements: +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' } -#' @param advanced_options list optional. A list containing the following elements: +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item maxErrors integer, -#' \item existingTableRows string, -#' \item diststyle string, -#' \item distkey string, -#' \item sortkey1 string, -#' \item sortkey2 string, -#' \item columnDelimiter string, -#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. -#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. -#' \item identityColumn string, -#' \item rowChunkSize integer, -#' \item wipeDestinationTable boolean, -#' \item truncateLongLines boolean, -#' \item invalidCharReplacement string, -#' \item verifyTableRowCounts boolean, -#' \item partitionColumnName string, This parameter is deprecated -#' \item partitionSchemaName string, This parameter is deprecated -#' \item partitionTableName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated -#' \item lastModifiedColumn string, -#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -#' \item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. -#' \item firstRowIsHeader boolean, -#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" -#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. -#' \item contactLists string, -#' \item soqlQuery string, -#' \item includeDeletedRecords boolean, -#' } +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +json_values_list_shares <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/json_values/{id}/shares" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of the table or file, if available. -#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item file list . A list containing the following elements: -#' \itemize{ -#' \item id integer, The file id. -#' } -#' \item googleWorksheet list . A list containing the following elements: -#' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' \item salesforce list . A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item objectName string, The Salesforce object name. -#' } +#' \item users array, +#' \item groups array, #' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item googleWorksheet list . A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } +#' \item users array, +#' \item groups array, #' }} -#' \item{advancedOptions}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item maxErrors integer, -#' \item existingTableRows string, -#' \item diststyle string, -#' \item distkey string, -#' \item sortkey1 string, -#' \item sortkey2 string, -#' \item columnDelimiter string, -#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. -#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. -#' \item identityColumn string, -#' \item rowChunkSize integer, -#' \item wipeDestinationTable boolean, -#' \item truncateLongLines boolean, -#' \item invalidCharReplacement string, -#' \item verifyTableRowCounts boolean, -#' \item partitionColumnName string, This parameter is deprecated -#' \item partitionSchemaName string, This parameter is deprecated -#' \item partitionTableName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated -#' \item lastModifiedColumn string, -#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -#' \item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. -#' \item firstRowIsHeader boolean, -#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" -#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. -#' \item contactLists string, -#' \item soqlQuery string, -#' \item includeDeletedRecords boolean, +#' \item users array, +#' \item groups array, #' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -imports_post_syncs <- function(id, source, destination, advanced_options = NULL) { +json_values_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/syncs" + path <- "/json_values/{id}/shares/users" path_params <- list(id = id) query_params <- list() - body_params <- list(source = source, destination = destination, advancedOptions = advanced_options) + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Update a sync -#' @param id integer required. The ID of the import to fetch. -#' @param sync_id integer required. The ID of the sync to fetch. -#' @param source list required. A list containing the following elements: -#' \itemize{ -#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item file list . -#' \item googleWorksheet list . A list containing the following elements: -#' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' \item salesforce list . A list containing the following elements: -#' \itemize{ -#' \item objectName string, The Salesforce object name. -#' } -#' } -#' @param destination list required. A list containing the following elements: -#' \itemize{ -#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item googleWorksheet list . A list containing the following elements: -#' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' } -#' @param advanced_options list optional. A list containing the following elements: -#' \itemize{ -#' \item maxErrors integer, -#' \item existingTableRows string, -#' \item diststyle string, -#' \item distkey string, -#' \item sortkey1 string, -#' \item sortkey2 string, -#' \item columnDelimiter string, -#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. -#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. -#' \item identityColumn string, -#' \item rowChunkSize integer, -#' \item wipeDestinationTable boolean, -#' \item truncateLongLines boolean, -#' \item invalidCharReplacement string, -#' \item verifyTableRowCounts boolean, -#' \item partitionColumnName string, This parameter is deprecated -#' \item partitionSchemaName string, This parameter is deprecated -#' \item partitionTableName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated -#' \item lastModifiedColumn string, -#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -#' \item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. -#' \item firstRowIsHeader boolean, -#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" -#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. -#' \item contactLists string, -#' \item soqlQuery string, -#' \item includeDeletedRecords boolean, -#' } +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. +#' +#' @return An empty HTTP response +#' @export +json_values_delete_shares_users <- function(id, user_id) { + + args <- as.list(match.call())[-1] + path <- "/json_values/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of the table or file, if available. -#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item file list . A list containing the following elements: -#' \itemize{ -#' \item id integer, The file id. -#' } -#' \item googleWorksheet list . A list containing the following elements: -#' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' \item salesforce list . A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item objectName string, The Salesforce object name. -#' } +#' \item users array, +#' \item groups array, #' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item googleWorksheet list . A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } +#' \item users array, +#' \item groups array, #' }} -#' \item{advancedOptions}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item maxErrors integer, -#' \item existingTableRows string, -#' \item diststyle string, -#' \item distkey string, -#' \item sortkey1 string, -#' \item sortkey2 string, -#' \item columnDelimiter string, -#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. -#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. -#' \item identityColumn string, -#' \item rowChunkSize integer, -#' \item wipeDestinationTable boolean, -#' \item truncateLongLines boolean, -#' \item invalidCharReplacement string, -#' \item verifyTableRowCounts boolean, -#' \item partitionColumnName string, This parameter is deprecated -#' \item partitionSchemaName string, This parameter is deprecated -#' \item partitionTableName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated -#' \item lastModifiedColumn string, -#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -#' \item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. -#' \item firstRowIsHeader boolean, -#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" -#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. -#' \item contactLists string, -#' \item soqlQuery string, -#' \item includeDeletedRecords boolean, +#' \item users array, +#' \item groups array, #' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -imports_put_syncs <- function(id, sync_id, source, destination, advanced_options = NULL) { +json_values_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/syncs/{sync_id}" - path_params <- list(id = id, sync_id = sync_id) + path <- "/json_values/{id}/shares/groups" + path_params <- list(id = id) query_params <- list() - body_params <- list(source = source, destination = destination, advancedOptions = advanced_options) + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -10035,17 +11625,17 @@ imports_put_syncs <- function(id, sync_id, source, destination, advanced_options } -#' Archive a sync (deprecated, use the /archive endpoint instead) -#' @param id integer required. The ID of the import to fetch. -#' @param sync_id integer required. The ID of the sync to fetch. +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. #' #' @return An empty HTTP response #' @export -imports_delete_syncs <- function(id, sync_id) { +json_values_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/syncs/{sync_id}" - path_params <- list(id = id, sync_id = sync_id) + path <- "/json_values/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -10058,96 +11648,60 @@ imports_delete_syncs <- function(id, sync_id) { } -#' Update the archive status of this sync -#' @param id integer required. The ID of the import to fetch. -#' @param sync_id integer required. The ID of the sync to fetch. -#' @param status boolean optional. The desired archived status of the sync. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +json_values_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/json_values/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{source}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of the table or file, if available. -#' \item path string, The path of the dataset to sync from; for a database source, schema.tablename. If you are doing a Google Sheet export, this can be blank. This is a legacy parameter, it is recommended you use one of the following: databaseTable, file, googleWorksheet, salesforce -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item file list . A list containing the following elements: -#' \itemize{ -#' \item id integer, The file id. -#' } -#' \item googleWorksheet list . A list containing the following elements: -#' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' \item salesforce list . A list containing the following elements: -#' \itemize{ -#' \item objectName string, The Salesforce object name. -#' } -#' }} -#' \item{destination}{list, A list containing the following elements: -#' \itemize{ -#' \item path string, The schema.tablename to sync to. If you are doing a Google Sheet export, this is the spreadsheet and sheet name separated by a period. i.e. if you have a spreadsheet named "MySpreadsheet" and a sheet called "Sheet1" this field would be "MySpreadsheet.Sheet1". This is a legacy parameter, it is recommended you use one of the following: databaseTable, googleWorksheet -#' \item databaseTable list . A list containing the following elements: -#' \itemize{ -#' \item schema string, The database schema name. -#' \item table string, The database table name. -#' \item useWithoutSchema boolean, This attribute is no longer available; defaults to false but cannot be used. -#' } -#' \item googleWorksheet list . A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: #' \itemize{ -#' \item spreadsheet string, The spreadsheet document name. -#' \item spreadsheetId string, The spreadsheet document id. -#' \item worksheet string, The worksheet tab name. -#' \item worksheetId integer, The worksheet tab id. -#' } -#' }} -#' \item{advancedOptions}{list, A list containing the following elements: -#' \itemize{ -#' \item maxErrors integer, -#' \item existingTableRows string, -#' \item diststyle string, -#' \item distkey string, -#' \item sortkey1 string, -#' \item sortkey2 string, -#' \item columnDelimiter string, -#' \item columnOverrides object, Hash used for overriding auto-detected names and types, with keys being the index of the column being overridden. -#' \item escaped boolean, If true, escape quotes with a backslash; otherwise, escape quotes by double-quoting. Defaults to false. -#' \item identityColumn string, -#' \item rowChunkSize integer, -#' \item wipeDestinationTable boolean, -#' \item truncateLongLines boolean, -#' \item invalidCharReplacement string, -#' \item verifyTableRowCounts boolean, -#' \item partitionColumnName string, This parameter is deprecated -#' \item partitionSchemaName string, This parameter is deprecated -#' \item partitionTableName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMinName string, This parameter is deprecated -#' \item partitionTablePartitionColumnMaxName string, This parameter is deprecated -#' \item lastModifiedColumn string, -#' \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -#' \item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. -#' \item firstRowIsHeader boolean, -#' \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" -#' \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. -#' \item contactLists string, -#' \item soqlQuery string, -#' \item includeDeletedRecords boolean, +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user #' }} #' @export -imports_put_syncs_archive <- function(id, sync_id, status = NULL) { +json_values_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/imports/{id}/syncs/{sync_id}/archive" - path_params <- list(id = id, sync_id = sync_id) + path <- "/json_values/{id}/transfer" + path_params <- list(id = id) query_params <- list() - body_params <- list(status = status) + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -10158,52 +11712,34 @@ imports_put_syncs_archive <- function(id, sync_id, status = NULL) { } -#' List Jobs -#' @param state string optional. The job's state. One or more of queued, running, succeeded, failed, and cancelled. Specify multiple values as a comma-separated list (e.g., "A,B"). -#' @param type string optional. The job's type. Specify multiple values as a comma-separated list (e.g., "A,B"). -#' @param q string optional. Query string to search on the id, name, and job type. -#' @param permission string optional. A permissions string, one of "read", "write", or "manage". Lists only jobs for which the current user has that permission. -#' @param scheduled boolean optional. If the item is scheduled. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. -#' @param archived string optional. The archival status of the requested item(s). -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. #' #' @return An array containing the following fields: -#' \item{id}{integer, } -#' \item{name}{string, } -#' \item{type}{string, } -#' \item{fromTemplateId}{integer, } -#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{lastRun}{list, A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item users array, +#' \item groups array, #' }} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{schedule}{list, A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item users array, +#' \item groups array, #' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_list <- function(state = NULL, type = NULL, q = NULL, permission = NULL, scheduled = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +match_targets_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/jobs/" - path_params <- list() - query_params <- list(state = state, type = type, q = q, permission = permission, scheduled = scheduled, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/match_targets/{id}/shares" + path_params <- list(id = id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -10215,140 +11751,183 @@ jobs_list <- function(state = NULL, type = NULL, q = NULL, permission = NULL, sc } -#' Show basic job info -#' @param id integer required. The ID for this job. +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{name}{string, } -#' \item{type}{string, } -#' \item{fromTemplateId}{integer, } -#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{runs}{array, An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item users array, +#' \item groups array, #' }} -#' \item{lastRun}{list, A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item users array, +#' \item groups array, #' }} -#' \item{hidden}{boolean, The hidden status of the item.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{successEmailSubject}{string, } -#' \item{successEmailBody}{string, } -#' \item{runningAsUser}{string, } -#' \item{runByUser}{string, } -#' \item{schedule}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item users array, +#' \item groups array, #' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_get <- function(id) { +match_targets_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}" + path <- "/match_targets/{id}/shares/users" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Generate and retrieve trigger email address -#' @param id integer required. The ID for this job. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. #' -#' @return A list containing the following elements: -#' \item{triggerEmail}{string, Email address which may be used to trigger this job to run.} +#' @return An empty HTTP response #' @export -jobs_post_trigger_email <- function(id) { +match_targets_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/trigger_email" - path_params <- list(id = id) + path <- "/match_targets/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Show chain of parents as a list that this job triggers from -#' @param id integer required. The ID for this job. +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{name}{string, } -#' \item{type}{string, } -#' \item{fromTemplateId}{integer, } -#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{runs}{array, An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item users array, +#' \item groups array, #' }} -#' \item{lastRun}{list, A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item users array, +#' \item groups array, #' }} -#' \item{hidden}{boolean, The hidden status of the item.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{successEmailSubject}{string, } -#' \item{successEmailBody}{string, } -#' \item{runningAsUser}{string, } -#' \item{runByUser}{string, } -#' \item{schedule}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item users array, +#' \item groups array, #' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_list_parents <- function(id) { +match_targets_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/parents" + path <- "/match_targets/{id}/shares/groups" + path_params <- list(id = id) + query_params <- list() + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. +#' +#' @return An empty HTTP response +#' @export +match_targets_delete_shares_groups <- function(id, group_id) { + + args <- as.list(match.call())[-1] + path <- "/match_targets/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the match target} +#' \item{name}{string, The name of the match target} +#' \item{targetFileName}{string, The name of the target file} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{archived}{boolean, Whether the match target has been archived.} +#' @export +match_targets_put_archive <- function(id, status) { + + args <- as.list(match.call())[-1] + path <- "/match_targets/{id}/archive" path_params <- list(id = id) query_params <- list() + body_params <- list(status = status) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List match targets +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the match target} +#' \item{name}{string, The name of the match target} +#' \item{targetFileName}{string, The name of the target file} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{archived}{boolean, Whether the match target has been archived.} +#' @export +match_targets_list <- function() { + + args <- as.list(match.call())[-1] + path <- "/match_targets/" + path_params <- list() + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -10360,41 +11939,51 @@ jobs_list_parents <- function(id) { } -#' Show nested tree of children that this job triggers -#' @param id integer required. The ID for this job. +#' Create a new match target +#' @param name string required. The name of the match target +#' @param target_file_name string optional. The name of the target file +#' @param archived boolean optional. Whether the match target has been archived. #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{name}{string, } -#' \item{type}{string, } -#' \item{fromTemplateId}{integer, } -#' \item{state}{string, } +#' \item{id}{integer, The ID of the match target} +#' \item{name}{string, The name of the match target} +#' \item{targetFileName}{string, The name of the target file} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{runs}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' \item{children}{array, } +#' \item{archived}{boolean, Whether the match target has been archived.} +#' @export +match_targets_post <- function(name, target_file_name = NULL, archived = NULL) { + + args <- as.list(match.call())[-1] + path <- "/match_targets/" + path_params <- list() + query_params <- list() + body_params <- list(name = name, targetFileName = target_file_name, archived = archived) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Show Match Target info +#' @param id integer required. The ID of the match target +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the match target} +#' \item{name}{string, The name of the match target} +#' \item{targetFileName}{string, The name of the target file} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{archived}{boolean, Whether the match target has been archived.} #' @export -jobs_list_children <- function(id) { +match_targets_get <- function(id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/children" + path <- "/match_targets/{id}" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -10408,106 +11997,130 @@ jobs_list_children <- function(id) { } -#' List runs for the given job -#' @param id integer required. The ID for this job. -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Update a match target +#' @param id integer required. The ID of the match target +#' @param name string optional. The name of the match target +#' @param target_file_name string optional. The name of the target file +#' @param archived boolean optional. Whether the match target has been archived. #' -#' @return An array containing the following fields: -#' \item{id}{integer, } -#' \item{state}{string, } -#' \item{createdAt}{string, The time that the run was queued.} -#' \item{startedAt}{string, The time that the run started.} -#' \item{finishedAt}{string, The time that the run completed.} -#' \item{error}{string, The error message for this run, if present.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the match target} +#' \item{name}{string, The name of the match target} +#' \item{targetFileName}{string, The name of the target file} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{archived}{boolean, Whether the match target has been archived.} #' @export -jobs_list_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +match_targets_patch <- function(id, name = NULL, target_file_name = NULL, archived = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/runs" + path <- "/match_targets/{id}" path_params <- list(id = id) - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) - body_params <- list() + query_params <- list() + body_params <- list(name = name, targetFileName = target_file_name, archived = archived) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Run a job -#' @param id integer required. The ID for this job. +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. #' -#' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{state}{string, } -#' \item{createdAt}{string, The time that the run was queued.} -#' \item{startedAt}{string, The time that the run started.} -#' \item{finishedAt}{string, The time that the run completed.} -#' \item{error}{string, The error message for this run, if present.} +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_post_runs <- function(id) { +media_list_spot_orders_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/runs" + path <- "/media/spot_orders/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Check status of a job -#' @param id integer required. The ID of the Job. -#' @param run_id integer required. The ID of the Run. +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{state}{string, } -#' \item{createdAt}{string, The time that the run was queued.} -#' \item{startedAt}{string, The time that the run started.} -#' \item{finishedAt}{string, The time that the run completed.} -#' \item{error}{string, The error message for this run, if present.} +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_get_runs <- function(id, run_id) { +media_put_spot_orders_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) + path <- "/media/spot_orders/{id}/shares/users" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Cancel a run -#' @param id integer required. The ID of the Job. -#' @param run_id integer required. The ID of the Run. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. #' #' @return An empty HTTP response #' @export -jobs_delete_runs <- function(id, run_id) { +media_delete_spot_orders_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) + path <- "/media/spot_orders/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -10520,112 +12133,95 @@ jobs_delete_runs <- function(id, run_id) { } -#' List the outputs for a run -#' @param id integer required. The ID of the job. -#' @param run_id integer required. The ID of the run. -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, id. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' -#' @return An array containing the following fields: -#' \item{objectType}{string, The type of the output. Valid values are File, Table, Report, Project, Credential, or JSONValue} -#' \item{objectId}{integer, The ID of the output.} -#' \item{name}{string, The name of the output.} -#' \item{link}{string, The hypermedia link to the output.} -#' \item{value}{string, } +#' @return A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_list_runs_outputs <- function(id, run_id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +media_put_spot_orders_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/runs/{run_id}/outputs" - path_params <- list(id = id, run_id = run_id) - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) - body_params <- list() + path <- "/media/spot_orders/{id}/shares/groups" + path_params <- list(id = id) + query_params <- list() + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Get the logs for a run -#' @param id integer required. The ID of the job. -#' @param run_id integer required. The ID of the run. -#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. -#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the log.} -#' \item{createdAt}{string, The time the log was created.} -#' \item{message}{string, The log message.} -#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' @return An empty HTTP response #' @export -jobs_list_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { +media_delete_spot_orders_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/runs/{run_id}/logs" - path_params <- list(id = id, run_id = run_id) - query_params <- list(last_id = last_id, limit = limit) + path <- "/media/spot_orders/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' List the workflows a job belongs to -#' @param id integer required. -#' @param archived string optional. The archival status of the requested item(s). +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for this workflow.} -#' \item{name}{string, The name of this workflow.} -#' \item{description}{string, A description of the workflow.} -#' \item{valid}{boolean, The validity of the workflow definition.} -#' \item{fileId}{string, The file id for the s3 file containing the workflow configuration.} -#' \item{user}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The state of the workflow. State is "running" if any execution is running, otherwise reflects most recent execution state.} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} -#' \item{timeZone}{string, The time zone of this workflow.} -#' \item{nextExecutionAt}{string, The time of the next scheduled execution.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the spot order.} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } +#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} +#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} +#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} +#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} #' @export -jobs_list_workflows <- function(id, archived = NULL) { +media_put_spot_orders_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/workflows" + path <- "/media/spot_orders/{id}/archive" path_params <- list(id = id) - query_params <- list(archived = archived) - body_params <- list() + query_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) @@ -10654,10 +12250,10 @@ jobs_list_workflows <- function(id, archived = NULL) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_list_shares <- function(id) { +media_list_optimizations_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/shares" + path <- "/media/optimizations/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -10697,10 +12293,10 @@ jobs_list_shares <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_put_optimizations_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/shares/users" + path <- "/media/optimizations/{id}/shares/users" path_params <- list(id = id) query_params <- list() body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -10720,10 +12316,10 @@ jobs_put_shares_users <- function(id, user_ids, permission_level, share_email_bo #' #' @return An empty HTTP response #' @export -jobs_delete_shares_users <- function(id, user_id) { +media_delete_optimizations_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/shares/users/{user_id}" + path <- "/media/optimizations/{id}/shares/users/{user_id}" path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() @@ -10763,10 +12359,10 @@ jobs_delete_shares_users <- function(id, user_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_put_optimizations_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/shares/groups" + path <- "/media/optimizations/{id}/shares/groups" path_params <- list(id = id) query_params <- list() body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -10786,10 +12382,10 @@ jobs_put_shares_groups <- function(id, group_ids, permission_level, share_email_ #' #' @return An empty HTTP response #' @export -jobs_delete_shares_groups <- function(id, group_id) { +media_delete_optimizations_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/shares/groups/{group_id}" + path <- "/media/optimizations/{id}/shares/groups/{group_id}" path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() @@ -10803,12 +12399,12 @@ jobs_delete_shares_groups <- function(id, group_id) { } -#' List the projects a Job belongs to -#' @param id integer required. The ID of the Job. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} +#' @return A list containing the following elements: +#' \item{id}{integer, The optimization ID.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -10817,27 +12413,79 @@ jobs_delete_shares_groups <- function(id, group_id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{autoShare}{boolean, } +#' \item{name}{string, The name of the optimization.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{finishedAt}{string, The end time of the last run.} +#' \item{state}{string, The state of the last run.} +#' \item{lastRunId}{integer, The ID of the last run.} +#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} #' \item{archived}{string, The archival status of the requested item(s).} +#' \item{reportLink}{string, A link to the visual report for the optimization.} +#' \item{spotOrderLink}{string, A link to the json version of the spot order.} +#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} +#' \item{runs}{array, An array containing the following fields: +#' \itemize{ +#' \item marketId integer, The market ID. +#' \item startDate string, The start date for the media run. +#' \item endDate string, The end date for the media run. +#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. +#' \item reachAlpha number, A tuning parameter used to adjust RF. +#' \item syscodes array, The syscodes for the media run. +#' \item rateCards array, The ratecards for the media run. +#' \item constraints array, The constraints for the media run. +#' }} +#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} +#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} +#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} +#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} +#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} #' @export -jobs_list_projects <- function(id, hidden = NULL) { +media_put_optimizations_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/projects" + path <- "/media/optimizations/{id}/archive" path_params <- list(id = id) - query_params <- list(hidden = hidden) + query_params <- list() + body_params <- list(status = status) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. +#' +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +media_list_ratecards_shares <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/media/ratecards/{id}/shares" + path_params <- list(id = id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -10849,19 +12497,39 @@ jobs_list_projects <- function(id, hidden = NULL) { } -#' Add a Job to a project -#' @param id integer required. The ID of the Job. -#' @param project_id integer required. The ID of the project. +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_put_projects <- function(id, project_id) { +media_put_ratecards_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/media/ratecards/{id}/shares/users" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -10872,17 +12540,17 @@ jobs_put_projects <- function(id, project_id) { } -#' Remove a Job from a project -#' @param id integer required. The ID of the Job. -#' @param project_id integer required. The ID of the project. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. #' #' @return An empty HTTP response #' @export -jobs_delete_projects <- function(id, project_id) { +media_delete_ratecards_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/media/ratecards/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -10895,58 +12563,39 @@ jobs_delete_projects <- function(id, project_id) { } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, } -#' \item{name}{string, } -#' \item{type}{string, } -#' \item{fromTemplateId}{integer, } -#' \item{state}{string, Whether the job is idle, queued, running, cancelled, or failed.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{runs}{array, An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item users array, +#' \item groups array, #' }} -#' \item{lastRun}{list, A list containing the following elements: +#' \item{writers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item users array, +#' \item groups array, #' }} -#' \item{hidden}{boolean, The hidden status of the item.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{successEmailSubject}{string, } -#' \item{successEmailBody}{string, } -#' \item{runningAsUser}{string, } -#' \item{runByUser}{string, } -#' \item{schedule}{list, A list containing the following elements: +#' \item{owners}{list, A list containing the following elements: #' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item users array, +#' \item groups array, #' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -jobs_put_archive <- function(id, status) { +media_put_ratecards_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/jobs/{id}/archive" + path <- "/media/ratecards/{id}/shares/groups" path_params <- list(id = id) query_params <- list() - body_params <- list(status = status) + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -10957,283 +12606,419 @@ jobs_put_archive <- function(id, status) { } -#' Create a JSON Value -#' @param value_str string required. The JSON value to store. Should be a serialized JSON string. Limited to 1000000 bytes. -#' @param name string optional. The name of the JSON Value. +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the JSON Value.} -#' \item{name}{string, The name of the JSON Value.} -#' \item{value}{string, The deserialized JSON value.} +#' @return An empty HTTP response #' @export -json_values_post <- function(value_str, name = NULL) { +media_delete_ratecards_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/json_values/" - path_params <- list() + path <- "/media/ratecards/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) query_params <- list() - body_params <- list(valueStr = value_str, name = name) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Get details about a JSON Value -#' @param id integer required. The ID of the JSON Value. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the JSON Value.} -#' \item{name}{string, The name of the JSON Value.} -#' \item{value}{string, The deserialized JSON value.} +#' \item{id}{integer, The ratecard ID.} +#' \item{filename}{string, Name of the ratecard file.} +#' \item{startOn}{string, First day to which the ratecard applies.} +#' \item{endOn}{string, Last day to which the ratecard applies.} +#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -json_values_get <- function(id) { +media_put_ratecards_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/json_values/{id}" + path <- "/media/ratecards/{id}/archive" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Update some attributes of this JSON Value -#' @param id integer required. The ID of the JSON Value. -#' @param name string optional. The name of the JSON Value. -#' @param value_str string optional. The JSON value to store. Should be a serialized JSON string. Limited to 1000000 bytes. +#' List all optimizations +#' @param archived string optional. The archival status of the requested item(s). +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, author, name. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the JSON Value.} -#' \item{name}{string, The name of the JSON Value.} -#' \item{value}{string, The deserialized JSON value.} +#' @return An array containing the following fields: +#' \item{id}{integer, The optimization ID.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{name}{string, The name of the optimization.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{finishedAt}{string, The end time of the last run.} +#' \item{state}{string, The state of the last run.} +#' \item{lastRunId}{integer, The ID of the last run.} +#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -json_values_patch <- function(id, name = NULL, value_str = NULL) { +media_list_optimizations <- function(archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/json_values/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list(name = name, valueStr = value_str) + path <- "/media/optimizations" + path_params <- list() + query_params <- list(archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. -#' -#' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: +#' Create a new optimization +#' @param runs array required. An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: +#' \item marketId integer, The market ID. +#' \item startDate string, The start date for the media run. +#' \item endDate string, The end date for the media run. +#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. +#' \item reachAlpha number, A tuning parameter used to adjust RF. +#' \item syscodes array, The syscodes for the media run. +#' \item rateCards array, The ratecards for the media run. +#' \item constraints array, The constraints for the media run. +#' } +#' @param name string optional. The name of the optimization. +#' @param programs array optional. An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set. +#' @param networks array optional. An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set. +#' @param exclude_programs boolean optional. If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set. +#' @param exclude_networks boolean optional. If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set. +#' @param time_slot_percentages list optional. The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The optimization ID.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{name}{string, The name of the optimization.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{finishedAt}{string, The end time of the last run.} +#' \item{state}{string, The state of the last run.} +#' \item{lastRunId}{integer, The ID of the last run.} +#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{reportLink}{string, A link to the visual report for the optimization.} +#' \item{spotOrderLink}{string, A link to the json version of the spot order.} +#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} +#' \item{runs}{array, An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item marketId integer, The market ID. +#' \item startDate string, The start date for the media run. +#' \item endDate string, The end date for the media run. +#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. +#' \item reachAlpha number, A tuning parameter used to adjust RF. +#' \item syscodes array, The syscodes for the media run. +#' \item rateCards array, The ratecards for the media run. +#' \item constraints array, The constraints for the media run. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} +#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} +#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} +#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} +#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} #' @export -json_values_list_shares <- function(id) { +media_post_optimizations <- function(runs, name = NULL, programs = NULL, networks = NULL, exclude_programs = NULL, exclude_networks = NULL, time_slot_percentages = NULL) { args <- as.list(match.call())[-1] - path <- "/json_values/{id}/shares" - path_params <- list(id = id) + path <- "/media/optimizations" + path_params <- list() query_params <- list() - body_params <- list() + body_params <- list(runs = runs, name = name, programs = programs, networks = networks, excludePrograms = exclude_programs, excludeNetworks = exclude_networks, timeSlotPercentages = time_slot_percentages) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Show a single optimization +#' @param id integer required. The optimization ID. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{id}{integer, The optimization ID.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{name}{string, The name of the optimization.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{finishedAt}{string, The end time of the last run.} +#' \item{state}{string, The state of the last run.} +#' \item{lastRunId}{integer, The ID of the last run.} +#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{reportLink}{string, A link to the visual report for the optimization.} +#' \item{spotOrderLink}{string, A link to the json version of the spot order.} +#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} +#' \item{runs}{array, An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item marketId integer, The market ID. +#' \item startDate string, The start date for the media run. +#' \item endDate string, The end date for the media run. +#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. +#' \item reachAlpha number, A tuning parameter used to adjust RF. +#' \item syscodes array, The syscodes for the media run. +#' \item rateCards array, The ratecards for the media run. +#' \item constraints array, The constraints for the media run. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} +#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} +#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} +#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} +#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} #' @export -json_values_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_get_optimizations <- function(id) { args <- as.list(match.call())[-1] - path <- "/json_values/{id}/shares/users" + path <- "/media/optimizations/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' Edit an existing optimization +#' @param id integer required. The optimization ID. +#' @param name string optional. The name of the optimization. +#' @param runs array optional. An array containing the following fields: +#' \itemize{ +#' \item marketId integer, The market ID. +#' \item startDate string, The start date for the media run. +#' \item endDate string, The end date for the media run. +#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. +#' \item reachAlpha number, A tuning parameter used to adjust RF. +#' \item syscodes array, The syscodes for the media run. +#' \item rateCards array, The ratecards for the media run. +#' \item constraints array, The constraints for the media run. +#' } +#' @param programs array optional. An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set. +#' @param networks array optional. An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set. +#' @param exclude_programs boolean optional. If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set. +#' @param exclude_networks boolean optional. If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set. +#' @param time_slot_percentages list optional. The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The optimization ID.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{name}{string, The name of the optimization.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{finishedAt}{string, The end time of the last run.} +#' \item{state}{string, The state of the last run.} +#' \item{lastRunId}{integer, The ID of the last run.} +#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{reportLink}{string, A link to the visual report for the optimization.} +#' \item{spotOrderLink}{string, A link to the json version of the spot order.} +#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} +#' \item{runs}{array, An array containing the following fields: +#' \itemize{ +#' \item marketId integer, The market ID. +#' \item startDate string, The start date for the media run. +#' \item endDate string, The end date for the media run. +#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. +#' \item reachAlpha number, A tuning parameter used to adjust RF. +#' \item syscodes array, The syscodes for the media run. +#' \item rateCards array, The ratecards for the media run. +#' \item constraints array, The constraints for the media run. +#' }} +#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} +#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} +#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} +#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} +#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} #' @export -json_values_delete_shares_users <- function(id, user_id) { +media_patch_optimizations <- function(id, name = NULL, runs = NULL, programs = NULL, networks = NULL, exclude_programs = NULL, exclude_networks = NULL, time_slot_percentages = NULL) { args <- as.list(match.call())[-1] - path <- "/json_values/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/media/optimizations/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(name = name, runs = runs, programs = programs, networks = networks, excludePrograms = exclude_programs, excludeNetworks = exclude_networks, timeSlotPercentages = time_slot_percentages) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Clone an existing optimization +#' @param id integer required. The optimization ID. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{id}{integer, The optimization ID.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{name}{string, The name of the optimization.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{finishedAt}{string, The end time of the last run.} +#' \item{state}{string, The state of the last run.} +#' \item{lastRunId}{integer, The ID of the last run.} +#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{reportLink}{string, A link to the visual report for the optimization.} +#' \item{spotOrderLink}{string, A link to the json version of the spot order.} +#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} +#' \item{runs}{array, An array containing the following fields: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item marketId integer, The market ID. +#' \item startDate string, The start date for the media run. +#' \item endDate string, The end date for the media run. +#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. +#' \item reachAlpha number, A tuning parameter used to adjust RF. +#' \item syscodes array, The syscodes for the media run. +#' \item rateCards array, The ratecards for the media run. +#' \item constraints array, The constraints for the media run. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} +#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} +#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} +#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} +#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} #' @export -json_values_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_post_optimizations_clone <- function(id) { args <- as.list(match.call())[-1] - path <- "/json_values/{id}/shares/groups" + path <- "/media/optimizations/{id}/clone" path_params <- list(id = id) query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' Start a run +#' @param id integer required. The ID of the optimization. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the run.} +#' \item{optimizationId}{integer, The ID of the optimization.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -json_values_delete_shares_groups <- function(id, group_id) { +media_post_optimizations_runs <- function(id) { args <- as.list(match.call())[-1] - path <- "/json_values/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) + path <- "/media/optimizations/{id}/runs" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' List runs for the given optimization +#' @param id integer required. The ID of the optimization. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{id}{integer, The ID of the run.} +#' \item{optimizationId}{integer, The ID of the optimization.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -match_targets_list_shares <- function(id) { +media_list_optimizations_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}/shares" + path <- "/media/optimizations/{id}/runs" path_params <- list(id = id) - query_params <- list() + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -11245,60 +13030,48 @@ match_targets_list_shares <- function(id) { } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Check status of a run +#' @param id integer required. The ID of the optimization. +#' @param run_id integer required. The ID of the run. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{id}{integer, The ID of the run.} +#' \item{optimizationId}{integer, The ID of the optimization.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' @export -match_targets_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_get_optimizations_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}/shares/users" - path_params <- list(id = id) + path <- "/media/optimizations/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' Cancel a run +#' @param id integer required. The ID of the optimization. +#' @param run_id integer required. The ID of the run. #' #' @return An empty HTTP response #' @export -match_targets_delete_shares_users <- function(id, user_id) { +media_delete_optimizations_runs <- function(id, run_id) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/media/optimizations/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -11311,116 +13084,104 @@ match_targets_delete_shares_users <- function(id, user_id) { } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Get the logs for a run +#' @param id integer required. The ID of the optimization. +#' @param run_id integer required. The ID of the run. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. #' -#' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} #' @export -match_targets_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_list_optimizations_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}/shares/groups" - path_params <- list(id = id) - query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path <- "/media/optimizations/{id}/runs/{run_id}/logs" + path_params <- list(id = id, run_id = run_id) + query_params <- list(last_id = last_id, limit = limit) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' List all spot orders +#' @param id integer optional. The ID for the spot order. +#' @param archived string optional. The archival status of the requested item(s). #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for the spot order.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -match_targets_delete_shares_groups <- function(id, group_id) { +media_list_spot_orders <- function(id = NULL, archived = NULL) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) - query_params <- list() + path <- "/media/spot_orders" + path_params <- list() + query_params <- list(id = id, archived = archived) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Create a spot order +#' @param body string optional. CSV body of a spot order. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the match target} -#' \item{name}{string, The name of the match target} -#' \item{targetFileName}{string, The name of the target file} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{boolean, Whether the match target has been archived.} +#' \item{id}{integer, The ID for the spot order.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} +#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} +#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} +#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} #' @export -match_targets_put_archive <- function(id, status) { +media_post_spot_orders <- function(body = NULL) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}/archive" - path_params <- list(id = id) + path <- "/media/spot_orders" + path_params <- list() query_params <- list() - body_params <- list(status = status) + body_params <- list(body = body) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List match targets +#' Show a single spot order +#' @param id integer required. The ID for the spot order. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the match target} -#' \item{name}{string, The name of the match target} -#' \item{targetFileName}{string, The name of the target file} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{boolean, Whether the match target has been archived.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the spot order.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} +#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} +#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} +#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} #' @export -match_targets_list <- function() { +media_get_spot_orders <- function(id) { args <- as.list(match.call())[-1] - path <- "/match_targets/" - path_params <- list() + path <- "/media/spot_orders/{id}" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -11433,53 +13194,54 @@ match_targets_list <- function() { } -#' Create a new match target -#' @param name string required. The name of the match target -#' @param target_file_name string optional. The name of the target file -#' @param archived boolean optional. Whether the match target has been archived. +#' Edit the specified spot order +#' @param id integer required. The ID for the spot order. +#' @param body string optional. CSV body of a spot order. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the match target} -#' \item{name}{string, The name of the match target} -#' \item{targetFileName}{string, The name of the target file} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{boolean, Whether the match target has been archived.} +#' \item{id}{integer, The ID for the spot order.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} +#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} +#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} +#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} #' @export -match_targets_post <- function(name, target_file_name = NULL, archived = NULL) { +media_put_spot_orders <- function(id, body = NULL) { args <- as.list(match.call())[-1] - path <- "/match_targets/" - path_params <- list() + path <- "/media/spot_orders/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, targetFileName = target_file_name, archived = archived) + body_params <- list(body = body) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Show Match Target info -#' @param id integer required. The ID of the match target +#' List all ratecards +#' @param archived string optional. The archival status of the requested item(s). +#' @param filename string optional. If specified, will be used to filter the ratecards returned. Substring matching is supported with "\%" and "*" wildcards (e.g., "filename=\%ratecard\%" will return both "ratecard 1" and "my ratecard"). +#' @param dma_number integer optional. If specified, will be used to filter the ratecards by DMA. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the match target} -#' \item{name}{string, The name of the match target} -#' \item{targetFileName}{string, The name of the target file} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{boolean, Whether the match target has been archived.} +#' @return An array containing the following fields: +#' \item{id}{integer, The ratecard ID.} +#' \item{filename}{string, Name of the ratecard file.} +#' \item{startOn}{string, First day to which the ratecard applies.} +#' \item{endOn}{string, Last day to which the ratecard applies.} +#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -match_targets_get <- function(id) { +media_list_ratecards <- function(archived = NULL, filename = NULL, dma_number = NULL) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}" - path_params <- list(id = id) - query_params <- list() + path <- "/media/ratecards" + path_params <- list() + query_params <- list(archived = archived, filename = filename, dma_number = dma_number) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -11491,63 +13253,52 @@ match_targets_get <- function(id) { } -#' Update a match target -#' @param id integer required. The ID of the match target -#' @param name string optional. The name of the match target -#' @param target_file_name string optional. The name of the target file -#' @param archived boolean optional. Whether the match target has been archived. +#' Create a Ratecard +#' @param filename string required. Name of the ratecard file. +#' @param start_on string required. First day to which the ratecard applies. +#' @param end_on string required. Last day to which the ratecard applies. +#' @param dma_number integer required. Number of the DMA associated with the ratecard. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the match target} -#' \item{name}{string, The name of the match target} -#' \item{targetFileName}{string, The name of the target file} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{boolean, Whether the match target has been archived.} +#' \item{id}{integer, The ratecard ID.} +#' \item{filename}{string, Name of the ratecard file.} +#' \item{startOn}{string, First day to which the ratecard applies.} +#' \item{endOn}{string, Last day to which the ratecard applies.} +#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -match_targets_patch <- function(id, name = NULL, target_file_name = NULL, archived = NULL) { +media_post_ratecards <- function(filename, start_on, end_on, dma_number) { args <- as.list(match.call())[-1] - path <- "/match_targets/{id}" - path_params <- list(id = id) + path <- "/media/ratecards" + path_params <- list() query_params <- list() - body_params <- list(name = name, targetFileName = target_file_name, archived = archived) + body_params <- list(filename = filename, startOn = start_on, endOn = end_on, dmaNumber = dma_number) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' Get a Ratecard +#' @param id integer required. #' -#' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ratecard ID.} +#' \item{filename}{string, Name of the ratecard file.} +#' \item{startOn}{string, First day to which the ratecard applies.} +#' \item{endOn}{string, Last day to which the ratecard applies.} +#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_list_spot_orders_shares <- function(id) { +media_get_ratecards <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}/shares" + path <- "/media/ratecards/{id}" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -11561,39 +13312,28 @@ media_list_spot_orders_shares <- function(id) { } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Replace all attributes of this Ratecard +#' @param id integer required. The ratecard ID. +#' @param filename string required. Name of the ratecard file. +#' @param start_on string required. First day to which the ratecard applies. +#' @param end_on string required. Last day to which the ratecard applies. +#' @param dma_number integer required. Number of the DMA associated with the ratecard. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{id}{integer, The ratecard ID.} +#' \item{filename}{string, Name of the ratecard file.} +#' \item{startOn}{string, First day to which the ratecard applies.} +#' \item{endOn}{string, Last day to which the ratecard applies.} +#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_put_spot_orders_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_put_ratecards <- function(id, filename, start_on, end_on, dma_number) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}/shares/users" + path <- "/media/ratecards/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list(filename = filename, startOn = start_on, endOn = end_on, dmaNumber = dma_number) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -11604,152 +13344,205 @@ media_put_spot_orders_shares_users <- function(id, user_ids, permission_level, s } -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' Update some attributes of this Ratecard +#' @param id integer required. The ratecard ID. +#' @param filename string optional. Name of the ratecard file. +#' @param start_on string optional. First day to which the ratecard applies. +#' @param end_on string optional. Last day to which the ratecard applies. +#' @param dma_number integer optional. Number of the DMA associated with the ratecard. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ratecard ID.} +#' \item{filename}{string, Name of the ratecard file.} +#' \item{startOn}{string, First day to which the ratecard applies.} +#' \item{endOn}{string, Last day to which the ratecard applies.} +#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_delete_spot_orders_shares_users <- function(id, user_id) { +media_patch_ratecards <- function(id, filename = NULL, start_on = NULL, end_on = NULL, dma_number = NULL) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/media/ratecards/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(filename = filename, startOn = start_on, endOn = end_on, dmaNumber = dma_number) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' List all Designated Market Areas +#' @param name string optional. If specified, will be used to filter the DMAs returned. Substring matching is supported with "\%" and "*" wildcards (e.g., "name=\%region\%" will return both "region1" and "my region"). +#' @param number integer optional. If specified, will be used to filter the DMAS by number. #' -#' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @return An array containing the following fields: +#' \item{name}{string, Name for the DMA region.} +#' \item{number}{integer, Identifier number for a DMA.} #' @export -media_put_spot_orders_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +media_list_dmas <- function(name = NULL, number = NULL) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}/shares/groups" - path_params <- list(id = id) - query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path <- "/media/dmas" + path_params <- list() + query_params <- list(name = name, number = number) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' List all Media Targets +#' @param name string optional. The name of the target. +#' @param identifier string optional. A unique identifier for this target. +#' @param data_source string optional. The source of viewership data for this target. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{name}{string, The name of the target.} +#' \item{identifier}{string, A unique identifier for this target.} +#' \item{dataSource}{string, The source of viewership data for this target.} #' @export -media_delete_spot_orders_shares_groups <- function(id, group_id) { +media_list_targets <- function(name = NULL, identifier = NULL, data_source = NULL) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) - query_params <- list() + path <- "/media/targets" + path_params <- list() + query_params <- list(name = name, identifier = identifier, data_source = data_source) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' List all available model types #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the spot order.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} -#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} -#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} -#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the model type.} +#' \item{algorithm}{string, The name of the algorithm used to train the model.} +#' \item{dvType}{string, The type of dependent variable predicted by the model.} +#' \item{fintAllowed}{boolean, Whether this model type supports searching for interaction terms.} #' @export -media_put_spot_orders_archive <- function(id, status) { +models_list_types <- function() { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}/archive" - path_params <- list(id = id) + path <- "/models/types" + path_params <- list() query_params <- list() - body_params <- list(status = status) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' List +#' @param model_name string optional. If specified, will be used to filter the models returned. Substring matching is supported. (e.g., "modelName=model" will return both "model1" and "my model"). +#' @param training_table_name string optional. If specified, will be used to filter the models returned by the training dataset table name. Substring matching is supported. (e.g., "trainingTableName=table" will return both "table1" and "my_table"). +#' @param dependent_variable string optional. If specified, will be used to filter the models returned by the dependent variable column name. Substring matching is supported. (e.g., "dependentVariable=predictor" will return both "predictor" and "my predictor"). +#' @param status string optional. If specified, returns models with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param archived string optional. The archival status of the requested item(s). +#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID of the model.} +#' \item{tableName}{string, The qualified name of the table containing the training set from which to build the model.} +#' \item{databaseId}{integer, The ID of the database holding the training set table used to build the model.} +#' \item{credentialId}{integer, The ID of the credential used to read the target table. Defaults to the user's default credential.} +#' \item{modelName}{string, The name of the model.} +#' \item{description}{string, A description of the model.} +#' \item{interactionTerms}{boolean, Whether to search for interaction terms.} +#' \item{boxCoxTransformation}{boolean, Whether to transform data so that it assumes a normal distribution. Valid only with continuous models.} +#' \item{modelTypeId}{integer, The ID of the model's type.} +#' \item{primaryKey}{string, The unique ID (primary key) of the training dataset.} +#' \item{dependentVariable}{string, The dependent variable of the training dataset.} +#' \item{dependentVariableOrder}{array, The order of dependent variables, especially useful for Ordinal Modeling.} +#' \item{excludedColumns}{array, A list of columns which will be considered ineligible to be independent variables.} +#' \item{limitingSQL}{string, A custom SQL WHERE clause used to filter the rows used to build the model. (e.g., "id > 105").} +#' \item{crossValidationParameters}{list, Cross validation parameter grid for tree methods, e.g. {"n_estimators": [100, 200, 500], "learning_rate": [0.01, 0.1], "max_depth": [2, 3]}.} +#' \item{numberOfFolds}{integer, Number of folds for cross validation. Default value is 5.} +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{parentId}{integer, The ID of the parent job that will trigger this model.} +#' \item{timeZone}{string, The time zone of this model.} +#' \item{lastRun}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{user}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{createdAt}{string, The time the model was created.} +#' \item{updatedAt}{string, The time the model was updated.} +#' \item{currentBuildState}{string, The status of the current model build. One of "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted.} +#' \item{currentBuildException}{string, Exception message, if applicable, of the current model build.} +#' \item{builds}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of the model build. +#' \item name string, The name of the model build. +#' \item createdAt string, The time the model build was created. +#' \item description string, A description of the model build. +#' \item rootMeanSquaredError number, A key metric for continuous models. Nil for other model types. +#' \item rSquaredError number, A key metric for continuous models. Nil for other model types. +#' \item rocAuc number, A key metric for binary, multinomial, and ordinal models. Nil for other model types. +#' }} +#' \item{predictions}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of the model to which to apply the prediction. +#' \item tableName string, The qualified name of the table on which to apply the predictive model. +#' \item primaryKey array, The primary key or composite keys of the table being predicted. +#' \item limitingSQL string, A SQL WHERE clause used to scope the rows to be predicted. +#' \item outputTable string, The qualified name of the table to be created which will contain the model's predictions. +#' \item state string, The status of the prediction. One of: "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted. +#' }} +#' \item{lastOutputLocation}{string, The output JSON for the last build.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_list_optimizations_shares <- function(id) { +models_list <- function(model_name = NULL, training_table_name = NULL, dependent_variable = NULL, status = NULL, author = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/shares" - path_params <- list(id = id) - query_params <- list() + path <- "/models/" + path_params <- list() + query_params <- list(model_name = model_name, training_table_name = training_table_name, dependent_variable = dependent_variable, status = status, author = author, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -11761,191 +13554,240 @@ media_list_optimizations_shares <- function(id) { } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Retrieve model configuration +#' @param id integer required. The ID of the model. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: +#' \item{id}{integer, The ID of the model.} +#' \item{tableName}{string, The qualified name of the table containing the training set from which to build the model.} +#' \item{databaseId}{integer, The ID of the database holding the training set table used to build the model.} +#' \item{credentialId}{integer, The ID of the credential used to read the target table. Defaults to the user's default credential.} +#' \item{modelName}{string, The name of the model.} +#' \item{description}{string, A description of the model.} +#' \item{interactionTerms}{boolean, Whether to search for interaction terms.} +#' \item{boxCoxTransformation}{boolean, Whether to transform data so that it assumes a normal distribution. Valid only with continuous models.} +#' \item{modelTypeId}{integer, The ID of the model's type.} +#' \item{primaryKey}{string, The unique ID (primary key) of the training dataset.} +#' \item{dependentVariable}{string, The dependent variable of the training dataset.} +#' \item{dependentVariableOrder}{array, The order of dependent variables, especially useful for Ordinal Modeling.} +#' \item{excludedColumns}{array, A list of columns which will be considered ineligible to be independent variables.} +#' \item{limitingSQL}{string, A custom SQL WHERE clause used to filter the rows used to build the model. (e.g., "id > 105").} +#' \item{activeBuildId}{integer, The ID of the current active build, the build used to score predictions.} +#' \item{crossValidationParameters}{list, Cross validation parameter grid for tree methods, e.g. {"n_estimators": [100, 200, 500], "learning_rate": [0.01, 0.1], "max_depth": [2, 3]}.} +#' \item{numberOfFolds}{integer, Number of folds for cross validation. Default value is 5.} +#' \item{notifications}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. #' }} -#' \item{writers}{list, A list containing the following elements: +#' \item{schedule}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{parentId}{integer, The ID of the parent job that will trigger this model.} +#' \item{runningAs}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{timeZone}{string, The time zone of this model.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, The time the model was created.} +#' \item{updatedAt}{string, The time the model was updated.} +#' \item{currentBuildState}{string, The status of the current model build. One of "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted.} +#' \item{currentBuildException}{string, Exception message, if applicable, of the current model build.} +#' \item{builds}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of the model build. +#' \item name string, The name of the model build. +#' \item createdAt string, The time the model build was created. +#' \item description string, A description of the model build. +#' \item rootMeanSquaredError number, A key metric for continuous models. Nil for other model types. +#' \item rSquaredError number, A key metric for continuous models. Nil for other model types. +#' \item rocAuc number, A key metric for binary, multinomial, and ordinal models. Nil for other model types. +#' }} +#' \item{predictions}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of the model to which to apply the prediction. +#' \item tableName string, The qualified name of the table on which to apply the predictive model. +#' \item primaryKey array, The primary key or composite keys of the table being predicted. +#' \item limitingSQL string, A SQL WHERE clause used to scope the rows to be predicted. +#' \item outputTable string, The qualified name of the table to be created which will contain the model's predictions. +#' \item schedule object, +#' \item state string, The status of the prediction. One of: "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted. +#' }} +#' \item{lastOutputLocation}{string, The output JSON for the last build.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_put_optimizations_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +models_get <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/shares/users" + path <- "/models/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' Check status of a build +#' @param id integer required. The ID of the model. +#' @param build_id integer required. The ID of the build. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the model build.} +#' \item{state}{string, The state of the model build.one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{error}{string, The error, if any, returned by the build.} +#' \item{name}{string, The name of the model build.} +#' \item{createdAt}{string, The time the model build was created.} +#' \item{description}{string, A description of the model build.} +#' \item{rootMeanSquaredError}{number, A key metric for continuous models. Nil for other model types.} +#' \item{rSquaredError}{number, A key metric for continuous models. Nil for other model types.} +#' \item{rocAuc}{number, A key metric for binary, multinomial, and ordinal models. Nil for other model types.} +#' \item{transformationMetadata}{string, A string representing the full JSON output of the metadata for transformation of column names} +#' \item{output}{string, A string representing the JSON output for the specified build. Only present when smaller than 10KB in size.} +#' \item{outputLocation}{string, A URL representing the location of the full JSON output for the specified build.The URL link will be valid for 5 minutes.} #' @export -media_delete_optimizations_shares_users <- function(id, user_id) { +models_get_builds <- function(id, build_id) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/models/{id}/builds/{build_id}" + path_params <- list(id = id, build_id = build_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Cancel a build +#' @param id integer required. The ID of the model. +#' @param build_id integer required. The ID of the build. #' -#' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @return An empty HTTP response #' @export -media_put_optimizations_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +models_delete_builds <- function(id, build_id) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/shares/groups" - path_params <- list(id = id) + path <- "/models/{id}/builds/{build_id}" + path_params <- list(id = id, build_id = build_id) query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' List builds for the given model +#' @param id integer required. The ID of the model. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the model build.} +#' \item{state}{string, The state of the model build.one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +#' \item{error}{string, The error, if any, returned by the build.} +#' \item{name}{string, The name of the model build.} +#' \item{createdAt}{string, The time the model build was created.} +#' \item{description}{string, A description of the model build.} +#' \item{rootMeanSquaredError}{number, A key metric for continuous models. Nil for other model types.} +#' \item{rSquaredError}{number, A key metric for continuous models. Nil for other model types.} +#' \item{rocAuc}{number, A key metric for binary, multinomial, and ordinal models. Nil for other model types.} +#' \item{transformationMetadata}{string, A string representing the full JSON output of the metadata for transformation of column names} +#' \item{output}{string, A string representing the JSON output for the specified build. Only present when smaller than 10KB in size.} +#' \item{outputLocation}{string, A URL representing the location of the full JSON output for the specified build.The URL link will be valid for 5 minutes.} #' @export -media_delete_optimizations_shares_groups <- function(id, group_id) { +models_list_builds <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) - query_params <- list() + path <- "/models/{id}/builds" + path_params <- list(id = id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' Get the logs for a build +#' @param id integer required. The ID of the model. +#' @param build_id integer required. The ID of the build. +#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The optimization ID.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of the optimization.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} -#' \item{lastRunId}{integer, The ID of the last run.} -#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{reportLink}{string, A link to the visual report for the optimization.} -#' \item{spotOrderLink}{string, A link to the json version of the spot order.} -#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} -#' \item{runs}{array, An array containing the following fields: -#' \itemize{ -#' \item marketId integer, The market ID. -#' \item startDate string, The start date for the media run. -#' \item endDate string, The end date for the media run. -#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. -#' \item reachAlpha number, A tuning parameter used to adjust RF. -#' \item syscodes array, The syscodes for the media run. -#' \item rateCards array, The ratecards for the media run. -#' \item constraints array, The constraints for the media run. -#' }} -#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} -#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} -#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} -#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} -#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of the log.} +#' \item{createdAt}{string, The time the log was created.} +#' \item{message}{string, The log message.} +#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} #' @export -media_put_optimizations_archive <- function(id, status) { +models_list_builds_logs <- function(id, build_id, last_id = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/archive" - path_params <- list(id = id) - query_params <- list() - body_params <- list(status = status) + path <- "/models/{id}/builds/{build_id}/logs" + path_params <- list(id = id, build_id = build_id) + query_params <- list(last_id = last_id, limit = limit) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) @@ -11974,10 +13816,10 @@ media_put_optimizations_archive <- function(id, status) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -media_list_ratecards_shares <- function(id) { +models_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}/shares" + path <- "/models/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -12017,10 +13859,10 @@ media_list_ratecards_shares <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -media_put_ratecards_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +models_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}/shares/users" + path <- "/models/{id}/shares/users" path_params <- list(id = id) query_params <- list() body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -12040,10 +13882,10 @@ media_put_ratecards_shares_users <- function(id, user_ids, permission_level, sha #' #' @return An empty HTTP response #' @export -media_delete_ratecards_shares_users <- function(id, user_id) { +models_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}/shares/users/{user_id}" + path <- "/models/{id}/shares/users/{user_id}" path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() @@ -12083,10 +13925,10 @@ media_delete_ratecards_shares_users <- function(id, user_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -media_put_ratecards_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +models_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}/shares/groups" + path <- "/models/{id}/shares/groups" path_params <- list(id = id) query_params <- list() body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -12106,10 +13948,10 @@ media_put_ratecards_shares_groups <- function(id, group_ids, permission_level, s #' #' @return An empty HTTP response #' @export -media_delete_ratecards_shares_groups <- function(id, group_id) { +models_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}/shares/groups/{group_id}" + path <- "/models/{id}/shares/groups/{group_id}" path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() @@ -12123,99 +13965,76 @@ media_delete_ratecards_shares_groups <- function(id, group_id) { } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ratecard ID.} -#' \item{filename}{string, Name of the ratecard file.} -#' \item{startOn}{string, First day to which the ratecard applies.} -#' \item{endOn}{string, Last day to which the ratecard applies.} -#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -media_put_ratecards_archive <- function(id, status) { +models_list_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}/archive" + path <- "/models/{id}/dependencies" path_params <- list(id = id) - query_params <- list() - body_params <- list(status = status) + query_params <- list(user_id = user_id) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List all optimizations -#' @param archived string optional. The archival status of the requested item(s). -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, author, name. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' -#' @return An array containing the following fields: -#' \item{id}{integer, The optimization ID.} -#' \item{author}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user #' }} -#' \item{name}{string, The name of the optimization.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} -#' \item{lastRunId}{integer, The ID of the last run.} -#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} -#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_list_optimizations <- function(archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +models_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/media/optimizations" - path_params <- list() - query_params <- list(archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) - body_params <- list() + path <- "/models/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Create a new optimization -#' @param runs array required. An array containing the following fields: -#' \itemize{ -#' \item marketId integer, The market ID. -#' \item startDate string, The start date for the media run. -#' \item endDate string, The end date for the media run. -#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. -#' \item reachAlpha number, A tuning parameter used to adjust RF. -#' \item syscodes array, The syscodes for the media run. -#' \item rateCards array, The ratecards for the media run. -#' \item constraints array, The constraints for the media run. -#' } -#' @param name string optional. The name of the optimization. -#' @param programs array optional. An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set. -#' @param networks array optional. An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set. -#' @param exclude_programs boolean optional. If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set. -#' @param exclude_networks boolean optional. If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set. -#' @param time_slot_percentages list optional. The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable. +#' List the projects a Model belongs to +#' @param id integer required. The ID of the Model. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The optimization ID.} +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this project.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -12224,190 +14043,130 @@ media_list_optimizations <- function(archived = NULL, limit = NULL, page_num = N #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{name}{string, The name of the optimization.} +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{autoShare}{boolean, } #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} -#' \item{lastRunId}{integer, The ID of the last run.} -#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{reportLink}{string, A link to the visual report for the optimization.} -#' \item{spotOrderLink}{string, A link to the json version of the spot order.} -#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} -#' \item{runs}{array, An array containing the following fields: -#' \itemize{ -#' \item marketId integer, The market ID. -#' \item startDate string, The start date for the media run. -#' \item endDate string, The end date for the media run. -#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. -#' \item reachAlpha number, A tuning parameter used to adjust RF. -#' \item syscodes array, The syscodes for the media run. -#' \item rateCards array, The ratecards for the media run. -#' \item constraints array, The constraints for the media run. -#' }} -#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} -#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} -#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} -#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} -#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} #' @export -media_post_optimizations <- function(runs, name = NULL, programs = NULL, networks = NULL, exclude_programs = NULL, exclude_networks = NULL, time_slot_percentages = NULL) { +models_list_projects <- function(id, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/media/optimizations" - path_params <- list() - query_params <- list() - body_params <- list(runs = runs, name = name, programs = programs, networks = networks, excludePrograms = exclude_programs, excludeNetworks = exclude_networks, timeSlotPercentages = time_slot_percentages) + path <- "/models/{id}/projects" + path_params <- list(id = id) + query_params <- list(hidden = hidden) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Show a single optimization -#' @param id integer required. The optimization ID. +#' Add a Model to a project +#' @param id integer required. The ID of the Model. +#' @param project_id integer required. The ID of the project. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The optimization ID.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of the optimization.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} -#' \item{lastRunId}{integer, The ID of the last run.} -#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{reportLink}{string, A link to the visual report for the optimization.} -#' \item{spotOrderLink}{string, A link to the json version of the spot order.} -#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} -#' \item{runs}{array, An array containing the following fields: -#' \itemize{ -#' \item marketId integer, The market ID. -#' \item startDate string, The start date for the media run. -#' \item endDate string, The end date for the media run. -#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. -#' \item reachAlpha number, A tuning parameter used to adjust RF. -#' \item syscodes array, The syscodes for the media run. -#' \item rateCards array, The ratecards for the media run. -#' \item constraints array, The constraints for the media run. -#' }} -#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} -#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} -#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} -#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} -#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} +#' @return An empty HTTP response #' @export -media_get_optimizations <- function(id) { +models_put_projects <- function(id, project_id) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}" - path_params <- list(id = id) + path <- "/models/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Edit an existing optimization -#' @param id integer required. The optimization ID. -#' @param name string optional. The name of the optimization. -#' @param runs array optional. An array containing the following fields: -#' \itemize{ -#' \item marketId integer, The market ID. -#' \item startDate string, The start date for the media run. -#' \item endDate string, The end date for the media run. -#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. -#' \item reachAlpha number, A tuning parameter used to adjust RF. -#' \item syscodes array, The syscodes for the media run. -#' \item rateCards array, The ratecards for the media run. -#' \item constraints array, The constraints for the media run. -#' } -#' @param programs array optional. An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set. -#' @param networks array optional. An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set. -#' @param exclude_programs boolean optional. If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set. -#' @param exclude_networks boolean optional. If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set. -#' @param time_slot_percentages list optional. The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable. +#' Remove a Model from a project +#' @param id integer required. The ID of the Model. +#' @param project_id integer required. The ID of the project. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The optimization ID.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of the optimization.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} -#' \item{lastRunId}{integer, The ID of the last run.} -#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{reportLink}{string, A link to the visual report for the optimization.} -#' \item{spotOrderLink}{string, A link to the json version of the spot order.} -#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} -#' \item{runs}{array, An array containing the following fields: -#' \itemize{ -#' \item marketId integer, The market ID. -#' \item startDate string, The start date for the media run. -#' \item endDate string, The end date for the media run. -#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. -#' \item reachAlpha number, A tuning parameter used to adjust RF. -#' \item syscodes array, The syscodes for the media run. -#' \item rateCards array, The ratecards for the media run. -#' \item constraints array, The constraints for the media run. -#' }} -#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} -#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} -#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} -#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} -#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} +#' @return An empty HTTP response #' @export -media_patch_optimizations <- function(id, name = NULL, runs = NULL, programs = NULL, networks = NULL, exclude_programs = NULL, exclude_networks = NULL, time_slot_percentages = NULL) { +models_delete_projects <- function(id, project_id) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}" - path_params <- list(id = id) + path <- "/models/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() - body_params <- list(name = name, runs = runs, programs = programs, networks = networks, excludePrograms = exclude_programs, excludeNetworks = exclude_networks, timeSlotPercentages = time_slot_percentages) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Clone an existing optimization -#' @param id integer required. The optimization ID. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' #' @return A list containing the following elements: -#' \item{id}{integer, The optimization ID.} -#' \item{author}{list, A list containing the following elements: +#' \item{id}{integer, The ID of the model.} +#' \item{tableName}{string, The qualified name of the table containing the training set from which to build the model.} +#' \item{databaseId}{integer, The ID of the database holding the training set table used to build the model.} +#' \item{credentialId}{integer, The ID of the credential used to read the target table. Defaults to the user's default credential.} +#' \item{modelName}{string, The name of the model.} +#' \item{description}{string, A description of the model.} +#' \item{interactionTerms}{boolean, Whether to search for interaction terms.} +#' \item{boxCoxTransformation}{boolean, Whether to transform data so that it assumes a normal distribution. Valid only with continuous models.} +#' \item{modelTypeId}{integer, The ID of the model's type.} +#' \item{primaryKey}{string, The unique ID (primary key) of the training dataset.} +#' \item{dependentVariable}{string, The dependent variable of the training dataset.} +#' \item{dependentVariableOrder}{array, The order of dependent variables, especially useful for Ordinal Modeling.} +#' \item{excludedColumns}{array, A list of columns which will be considered ineligible to be independent variables.} +#' \item{limitingSQL}{string, A custom SQL WHERE clause used to filter the rows used to build the model. (e.g., "id > 105").} +#' \item{activeBuildId}{integer, The ID of the current active build, the build used to score predictions.} +#' \item{crossValidationParameters}{list, Cross validation parameter grid for tree methods, e.g. {"n_estimators": [100, 200, 500], "learning_rate": [0.01, 0.1], "max_depth": [2, 3]}.} +#' \item{numberOfFolds}{integer, Number of folds for cross validation. Default value is 5.} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{parentId}{integer, The ID of the parent job that will trigger this model.} +#' \item{runningAs}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -12415,102 +14174,153 @@ media_patch_optimizations <- function(id, name = NULL, runs = NULL, programs = N #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{name}{string, The name of the optimization.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} -#' \item{lastRunId}{integer, The ID of the last run.} -#' \item{spotOrderId}{integer, The ID for the spot order produced by the optimization.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{reportLink}{string, A link to the visual report for the optimization.} -#' \item{spotOrderLink}{string, A link to the json version of the spot order.} -#' \item{fileLinks}{array, Links to the csv and xml versions of the spot order.} -#' \item{runs}{array, An array containing the following fields: +#' \item{timeZone}{string, The time zone of this model.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, The time the model was created.} +#' \item{updatedAt}{string, The time the model was updated.} +#' \item{currentBuildState}{string, The status of the current model build. One of "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted.} +#' \item{currentBuildException}{string, Exception message, if applicable, of the current model build.} +#' \item{builds}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of the model build. +#' \item name string, The name of the model build. +#' \item createdAt string, The time the model build was created. +#' \item description string, A description of the model build. +#' \item rootMeanSquaredError number, A key metric for continuous models. Nil for other model types. +#' \item rSquaredError number, A key metric for continuous models. Nil for other model types. +#' \item rocAuc number, A key metric for binary, multinomial, and ordinal models. Nil for other model types. +#' }} +#' \item{predictions}{array, An array containing the following fields: #' \itemize{ -#' \item marketId integer, The market ID. -#' \item startDate string, The start date for the media run. -#' \item endDate string, The end date for the media run. -#' \item forceCpm boolean, Whether to force optimization to use CPM data even if partition data is available. -#' \item reachAlpha number, A tuning parameter used to adjust RF. -#' \item syscodes array, The syscodes for the media run. -#' \item rateCards array, The ratecards for the media run. -#' \item constraints array, The constraints for the media run. +#' \item id integer, The ID of the model to which to apply the prediction. +#' \item tableName string, The qualified name of the table on which to apply the predictive model. +#' \item primaryKey array, The primary key or composite keys of the table being predicted. +#' \item limitingSQL string, A SQL WHERE clause used to scope the rows to be predicted. +#' \item outputTable string, The qualified name of the table to be created which will contain the model's predictions. +#' \item schedule object, +#' \item state string, The status of the prediction. One of: "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted. #' }} -#' \item{programs}{array, An array of programs that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_programs is not also set.} -#' \item{networks}{array, An array of networks that the Civis Media Optimizer either exclude or limit to.An error will be thrown if exclude_networks is not also set.} -#' \item{excludePrograms}{boolean, If Civis Media Optimizer should exclude the programs in the programs parameter.If this value is set to false, it will make the optimization limit itself to the programs supplied through the programs parameter.An error will be thrown if programs is not also set.} -#' \item{excludeNetworks}{boolean, If Civis Media Optimizer should exclude the networks in the networks parameter.If this value is set to false, it will make the optimization limit itself to the networks supplied through the networks.An error will be thrown if networks is not also set.} -#' \item{timeSlotPercentages}{list, The maximum amount of the budget spent on that particular day of the week, daypart, or specific time slot for broadcast and cable.} +#' \item{lastOutputLocation}{string, The output JSON for the last build.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_post_optimizations_clone <- function(id) { +models_put_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/clone" + path <- "/models/{id}/archive" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Start a run -#' @param id integer required. The ID of the optimization. +#' Show the model build schedule +#' @param id integer required. The ID of the model associated with this schedule. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{optimizationId}{integer, The ID of the optimization.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{id}{integer, The ID of the model associated with this schedule.} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} #' @export -media_post_optimizations_runs <- function(id) { +models_list_schedules <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/runs" + path <- "/models/{id}/schedules" path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List runs for the given optimization -#' @param id integer required. The ID of the optimization. -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. +#' List Notebooks +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param archived string optional. The archival status of the requested item(s). +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. +#' @param status string optional. If specified, returns notebooks with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'pending', 'idle'. +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at. #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of the run.} -#' \item{optimizationId}{integer, The ID of the optimization.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} +#' \item{id}{integer, The ID for this notebook.} +#' \item{name}{string, The name of this notebook.} +#' \item{language}{string, The kernel language of this notebook.} +#' \item{description}{string, The description of this notebook.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{mostRecentDeployment}{list, A list containing the following elements: +#' \itemize{ +#' \item deploymentId integer, The ID for this deployment. +#' \item userId integer, The ID of the owner. +#' \item host string, Domain of the deployment. +#' \item name string, Name of the deployment. +#' \item dockerImageName string, The name of the docker image to pull from DockerHub. +#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). +#' \item instanceType string, The EC2 instance type requested for the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. +#' \item state string, The state of the deployment. +#' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. +#' \item createdAt string, +#' \item updatedAt string, +#' \item notebookId integer, The ID of owning Notebook +#' }} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -media_list_optimizations_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +notebooks_list <- function(hidden = NULL, archived = NULL, author = NULL, status = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/runs" - path_params <- list(id = id) - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/notebooks/" + path_params <- list() + query_params <- list(hidden = hidden, archived = archived, author = author, status = status, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -12522,102 +14332,167 @@ media_list_optimizations_runs <- function(id, limit = NULL, page_num = NULL, ord } -#' Check status of a run -#' @param id integer required. The ID of the optimization. -#' @param run_id integer required. The ID of the run. +#' Create a Notebook +#' @param name string optional. The name of this notebook. +#' @param language string optional. The kernel language of this notebook. +#' @param description string optional. The description of this notebook. +#' @param file_id string optional. The file ID for the S3 file containing the .ipynb file. +#' @param requirements_file_id string optional. The file ID for the S3 file containing the requirements.txt file. +#' @param requirements string optional. The requirements txt file. +#' @param docker_image_name string optional. The name of the docker image to pull from DockerHub. +#' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub (default: latest). +#' @param instance_type string optional. The EC2 instance type to deploy to. +#' @param memory integer optional. The amount of memory allocated to the notebook. +#' @param cpu integer optional. The amount of cpu allocated to the the notebook. +#' @param credentials array optional. A list of credential IDs to pass to the notebook. +#' @param environment_variables list optional. Environment variables to be passed into the Notebook. +#' @param idle_timeout integer optional. How long the notebook will stay alive without any kernel activity. +#' @param partition_label string optional. The partition label used to run this object. +#' @param git_repo_url string optional. The url of the git repository +#' @param git_ref string optional. The git reference if git repo is specified +#' @param git_path string optional. The path to the .ipynb file in the git repo that will be started up on notebook launch +#' @param hidden boolean optional. The hidden status of the item. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the run.} -#' \item{optimizationId}{integer, The ID of the optimization.} -#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} -#' \item{error}{string, The error, if any, returned by the run.} -#' @export -media_get_optimizations_runs <- function(id, run_id) { - - args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Cancel a run -#' @param id integer required. The ID of the optimization. -#' @param run_id integer required. The ID of the run. -#' -#' @return An empty HTTP response +#' \item{id}{integer, The ID for this notebook.} +#' \item{name}{string, The name of this notebook.} +#' \item{language}{string, The kernel language of this notebook.} +#' \item{description}{string, The description of this notebook.} +#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} +#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} +#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} +#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} +#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{instanceType}{string, The EC2 instance type to deploy to.} +#' \item{memory}{integer, The amount of memory allocated to the notebook.} +#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{mostRecentDeployment}{list, A list containing the following elements: +#' \itemize{ +#' \item deploymentId integer, The ID for this deployment. +#' \item userId integer, The ID of the owner. +#' \item host string, Domain of the deployment. +#' \item name string, Name of the deployment. +#' \item dockerImageName string, The name of the docker image to pull from DockerHub. +#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). +#' \item displayUrl string, A signed URL for viewing the deployed item. +#' \item instanceType string, The EC2 instance type requested for the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. +#' \item state string, The state of the deployment. +#' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. +#' \item createdAt string, +#' \item updatedAt string, +#' \item notebookId integer, The ID of owning Notebook +#' }} +#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} +#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} +#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{gitRepoId}{integer, The ID of the git repository.} +#' \item{gitRepoUrl}{string, The url of the git repository} +#' \item{gitRef}{string, The git reference if git repo is specified} +#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -media_delete_optimizations_runs <- function(id, run_id) { +notebooks_post <- function(name = NULL, language = NULL, description = NULL, file_id = NULL, requirements_file_id = NULL, requirements = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, environment_variables = NULL, idle_timeout = NULL, partition_label = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL, hidden = NULL) { args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/runs/{run_id}" - path_params <- list(id = id, run_id = run_id) + path <- "/notebooks/" + path_params <- list() query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Get the logs for a run -#' @param id integer required. The ID of the optimization. -#' @param run_id integer required. The ID of the run. -#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. -#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. -#' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the log.} -#' \item{createdAt}{string, The time the log was created.} -#' \item{message}{string, The log message.} -#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} -#' @export -media_list_optimizations_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { - - args <- as.list(match.call())[-1] - path <- "/media/optimizations/{id}/runs/{run_id}/logs" - path_params <- list(id = id, run_id = run_id) - query_params <- list(last_id = last_id, limit = limit) - body_params <- list() + body_params <- list(name = name, language = language, description = description, fileId = file_id, requirementsFileId = requirements_file_id, requirements = requirements, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, environmentVariables = environment_variables, idleTimeout = idle_timeout, partitionLabel = partition_label, gitRepoUrl = git_repo_url, gitRef = git_ref, gitPath = git_path, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List all spot orders -#' @param id integer optional. The ID for the spot order. -#' @param archived string optional. The archival status of the requested item(s). +#' Get a Notebook +#' @param id integer required. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for the spot order.} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for this notebook.} +#' \item{name}{string, The name of this notebook.} +#' \item{language}{string, The kernel language of this notebook.} +#' \item{description}{string, The description of this notebook.} +#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} +#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} +#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} +#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} +#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{instanceType}{string, The EC2 instance type to deploy to.} +#' \item{memory}{integer, The amount of memory allocated to the notebook.} +#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{mostRecentDeployment}{list, A list containing the following elements: +#' \itemize{ +#' \item deploymentId integer, The ID for this deployment. +#' \item userId integer, The ID of the owner. +#' \item host string, Domain of the deployment. +#' \item name string, Name of the deployment. +#' \item dockerImageName string, The name of the docker image to pull from DockerHub. +#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). +#' \item displayUrl string, A signed URL for viewing the deployed item. +#' \item instanceType string, The EC2 instance type requested for the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. +#' \item state string, The state of the deployment. +#' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. +#' \item createdAt string, +#' \item updatedAt string, +#' \item notebookId integer, The ID of owning Notebook +#' }} +#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} +#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} +#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{gitRepoId}{integer, The ID of the git repository.} +#' \item{gitRepoUrl}{string, The url of the git repository} +#' \item{gitRef}{string, The git reference if git repo is specified} +#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -media_list_spot_orders <- function(id = NULL, archived = NULL) { +notebooks_get <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders" - path_params <- list() - query_params <- list(id = id, archived = archived) + path <- "/notebooks/{id}" + path_params <- list(id = id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -12629,110 +14504,231 @@ media_list_spot_orders <- function(id = NULL, archived = NULL) { } -#' Create a spot order -#' @param body string optional. CSV body of a spot order. +#' Replace all attributes of this Notebook +#' @param id integer required. The ID for this notebook. +#' @param name string optional. The name of this notebook. +#' @param language string optional. The kernel language of this notebook. +#' @param description string optional. The description of this notebook. +#' @param file_id string optional. The file ID for the S3 file containing the .ipynb file. +#' @param requirements_file_id string optional. The file ID for the S3 file containing the requirements.txt file. +#' @param requirements string optional. The requirements txt file. +#' @param docker_image_name string optional. The name of the docker image to pull from DockerHub. +#' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub (default: latest). +#' @param instance_type string optional. The EC2 instance type to deploy to. +#' @param memory integer optional. The amount of memory allocated to the notebook. +#' @param cpu integer optional. The amount of cpu allocated to the the notebook. +#' @param credentials array optional. A list of credential IDs to pass to the notebook. +#' @param environment_variables list optional. Environment variables to be passed into the Notebook. +#' @param idle_timeout integer optional. How long the notebook will stay alive without any kernel activity. +#' @param partition_label string optional. The partition label used to run this object. +#' @param git_repo_url string optional. The url of the git repository +#' @param git_ref string optional. The git reference if git repo is specified +#' @param git_path string optional. The path to the .ipynb file in the git repo that will be started up on notebook launch #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the spot order.} +#' \item{id}{integer, The ID for this notebook.} +#' \item{name}{string, The name of this notebook.} +#' \item{language}{string, The kernel language of this notebook.} +#' \item{description}{string, The description of this notebook.} +#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} +#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} +#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} +#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} +#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{instanceType}{string, The EC2 instance type to deploy to.} +#' \item{memory}{integer, The amount of memory allocated to the notebook.} +#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{mostRecentDeployment}{list, A list containing the following elements: +#' \itemize{ +#' \item deploymentId integer, The ID for this deployment. +#' \item userId integer, The ID of the owner. +#' \item host string, Domain of the deployment. +#' \item name string, Name of the deployment. +#' \item dockerImageName string, The name of the docker image to pull from DockerHub. +#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). +#' \item displayUrl string, A signed URL for viewing the deployed item. +#' \item instanceType string, The EC2 instance type requested for the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. +#' \item state string, The state of the deployment. +#' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. +#' \item createdAt string, +#' \item updatedAt string, +#' \item notebookId integer, The ID of owning Notebook +#' }} +#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} +#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} +#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{gitRepoId}{integer, The ID of the git repository.} +#' \item{gitRepoUrl}{string, The url of the git repository} +#' \item{gitRef}{string, The git reference if git repo is specified} +#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} -#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} -#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} -#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -media_post_spot_orders <- function(body = NULL) { +notebooks_put <- function(id, name = NULL, language = NULL, description = NULL, file_id = NULL, requirements_file_id = NULL, requirements = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, environment_variables = NULL, idle_timeout = NULL, partition_label = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders" - path_params <- list() + path <- "/notebooks/{id}" + path_params <- list(id = id) query_params <- list() - body_params <- list(body = body) + body_params <- list(name = name, language = language, description = description, fileId = file_id, requirementsFileId = requirements_file_id, requirements = requirements, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, environmentVariables = environment_variables, idleTimeout = idle_timeout, partitionLabel = partition_label, gitRepoUrl = git_repo_url, gitRef = git_ref, gitPath = git_path) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Show a single spot order -#' @param id integer required. The ID for the spot order. +#' Update some attributes of this Notebook +#' @param id integer required. The ID for this notebook. +#' @param name string optional. The name of this notebook. +#' @param language string optional. The kernel language of this notebook. +#' @param description string optional. The description of this notebook. +#' @param file_id string optional. The file ID for the S3 file containing the .ipynb file. +#' @param requirements_file_id string optional. The file ID for the S3 file containing the requirements.txt file. +#' @param requirements string optional. The requirements txt file. +#' @param docker_image_name string optional. The name of the docker image to pull from DockerHub. +#' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub (default: latest). +#' @param instance_type string optional. The EC2 instance type to deploy to. +#' @param memory integer optional. The amount of memory allocated to the notebook. +#' @param cpu integer optional. The amount of cpu allocated to the the notebook. +#' @param credentials array optional. A list of credential IDs to pass to the notebook. +#' @param environment_variables list optional. Environment variables to be passed into the Notebook. +#' @param idle_timeout integer optional. How long the notebook will stay alive without any kernel activity. +#' @param partition_label string optional. The partition label used to run this object. +#' @param git_repo_url string optional. The url of the git repository +#' @param git_ref string optional. The git reference if git repo is specified +#' @param git_path string optional. The path to the .ipynb file in the git repo that will be started up on notebook launch #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for the spot order.} +#' \item{id}{integer, The ID for this notebook.} +#' \item{name}{string, The name of this notebook.} +#' \item{language}{string, The kernel language of this notebook.} +#' \item{description}{string, The description of this notebook.} +#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} +#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} +#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} +#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} +#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{instanceType}{string, The EC2 instance type to deploy to.} +#' \item{memory}{integer, The amount of memory allocated to the notebook.} +#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{mostRecentDeployment}{list, A list containing the following elements: +#' \itemize{ +#' \item deploymentId integer, The ID for this deployment. +#' \item userId integer, The ID of the owner. +#' \item host string, Domain of the deployment. +#' \item name string, Name of the deployment. +#' \item dockerImageName string, The name of the docker image to pull from DockerHub. +#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). +#' \item displayUrl string, A signed URL for viewing the deployed item. +#' \item instanceType string, The EC2 instance type requested for the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. +#' \item state string, The state of the deployment. +#' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. +#' \item createdAt string, +#' \item updatedAt string, +#' \item notebookId integer, The ID of owning Notebook +#' }} +#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} +#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} +#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{gitRepoId}{integer, The ID of the git repository.} +#' \item{gitRepoUrl}{string, The url of the git repository} +#' \item{gitRef}{string, The git reference if git repo is specified} +#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} -#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} -#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} -#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -media_get_spot_orders <- function(id) { +notebooks_patch <- function(id, name = NULL, language = NULL, description = NULL, file_id = NULL, requirements_file_id = NULL, requirements = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, environment_variables = NULL, idle_timeout = NULL, partition_label = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}" + path <- "/notebooks/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(name = name, language = language, description = description, fileId = file_id, requirementsFileId = requirements_file_id, requirements = requirements, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, environmentVariables = environment_variables, idleTimeout = idle_timeout, partitionLabel = partition_label, gitRepoUrl = git_repo_url, gitRef = git_ref, gitPath = git_path) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Edit the specified spot order -#' @param id integer required. The ID for the spot order. -#' @param body string optional. CSV body of a spot order. +#' Archive a Notebook (deprecated, use archiving endpoints instead) +#' @param id integer required. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the spot order.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{csvS3Uri}{string, S3 URI for the spot order CSV file.} -#' \item{jsonS3Uri}{string, S3 URI for the spot order JSON file.} -#' \item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.} -#' \item{lastTransformJobId}{integer, ID of the spot order transformation job.} +#' @return An empty HTTP response #' @export -media_put_spot_orders <- function(id, body = NULL) { +notebooks_delete <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/spot_orders/{id}" + path <- "/notebooks/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(body = body) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' List all ratecards -#' @param archived string optional. The archival status of the requested item(s). -#' @param filename string optional. If specified, will be used to filter the ratecards returned. Substring matching is supported with "\%" and "*" wildcards (e.g., "filename=\%ratecard\%" will return both "ratecard 1" and "my ratecard"). -#' @param dma_number integer optional. If specified, will be used to filter the ratecards by DMA. +#' Get URLs to update notebook +#' @param id integer required. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ratecard ID.} -#' \item{filename}{string, Name of the ratecard file.} -#' \item{startOn}{string, First day to which the ratecard applies.} -#' \item{endOn}{string, Last day to which the ratecard applies.} -#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' @return A list containing the following elements: +#' \item{updateUrl}{string, Time-limited URL to PUT new contents of the .ipynb file for this notebook.} +#' \item{updatePreviewUrl}{string, Time-limited URL to PUT new contents of the .htm preview file for this notebook.} #' @export -media_list_ratecards <- function(archived = NULL, filename = NULL, dma_number = NULL) { +notebooks_list_update_links <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/ratecards" - path_params <- list() - query_params <- list(archived = archived, filename = filename, dma_number = dma_number) + path <- "/notebooks/{id}/update-links" + path_params <- list(id = id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -12744,27 +14740,73 @@ media_list_ratecards <- function(archived = NULL, filename = NULL, dma_number = } -#' Create a Ratecard -#' @param filename string required. Name of the ratecard file. -#' @param start_on string required. First day to which the ratecard applies. -#' @param end_on string required. Last day to which the ratecard applies. -#' @param dma_number integer required. Number of the DMA associated with the ratecard. +#' Clone this Notebook +#' @param id integer required. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ratecard ID.} -#' \item{filename}{string, Name of the ratecard file.} -#' \item{startOn}{string, First day to which the ratecard applies.} -#' \item{endOn}{string, Last day to which the ratecard applies.} -#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} +#' \item{id}{integer, The ID for this notebook.} +#' \item{name}{string, The name of this notebook.} +#' \item{language}{string, The kernel language of this notebook.} +#' \item{description}{string, The description of this notebook.} +#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} +#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} +#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} +#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} +#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{instanceType}{string, The EC2 instance type to deploy to.} +#' \item{memory}{integer, The amount of memory allocated to the notebook.} +#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{mostRecentDeployment}{list, A list containing the following elements: +#' \itemize{ +#' \item deploymentId integer, The ID for this deployment. +#' \item userId integer, The ID of the owner. +#' \item host string, Domain of the deployment. +#' \item name string, Name of the deployment. +#' \item dockerImageName string, The name of the docker image to pull from DockerHub. +#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). +#' \item displayUrl string, A signed URL for viewing the deployed item. +#' \item instanceType string, The EC2 instance type requested for the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. +#' \item state string, The state of the deployment. +#' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. +#' \item createdAt string, +#' \item updatedAt string, +#' \item notebookId integer, The ID of owning Notebook +#' }} +#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} +#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} +#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{gitRepoId}{integer, The ID of the git repository.} +#' \item{gitRepoUrl}{string, The url of the git repository} +#' \item{gitRef}{string, The git reference if git repo is specified} +#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -media_post_ratecards <- function(filename, start_on, end_on, dma_number) { +notebooks_post_clone <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/ratecards" - path_params <- list() + path <- "/notebooks/{id}/clone" + path_params <- list(id = id) query_params <- list() - body_params <- list(filename = filename, startOn = start_on, endOn = end_on, dmaNumber = dma_number) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -12775,21 +14817,32 @@ media_post_ratecards <- function(filename, start_on, end_on, dma_number) { } -#' Get a Ratecard -#' @param id integer required. +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ratecard ID.} -#' \item{filename}{string, Name of the ratecard file.} -#' \item{startOn}{string, First day to which the ratecard applies.} -#' \item{endOn}{string, Last day to which the ratecard applies.} -#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -media_get_ratecards <- function(id) { +notebooks_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}" + path <- "/notebooks/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -12803,28 +14856,39 @@ media_get_ratecards <- function(id) { } -#' Replace all attributes of this Ratecard -#' @param id integer required. The ratecard ID. -#' @param filename string required. Name of the ratecard file. -#' @param start_on string required. First day to which the ratecard applies. -#' @param end_on string required. Last day to which the ratecard applies. -#' @param dma_number integer required. Number of the DMA associated with the ratecard. +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ratecard ID.} -#' \item{filename}{string, Name of the ratecard file.} -#' \item{startOn}{string, First day to which the ratecard applies.} -#' \item{endOn}{string, Last day to which the ratecard applies.} -#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -media_put_ratecards <- function(id, filename, start_on, end_on, dma_number) { +notebooks_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}" + path <- "/notebooks/{id}/shares/users" path_params <- list(id = id) query_params <- list() - body_params <- list(filename = filename, startOn = start_on, endOn = end_on, dmaNumber = dma_number) + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -12835,79 +14899,113 @@ media_put_ratecards <- function(id, filename, start_on, end_on, dma_number) { } -#' Update some attributes of this Ratecard -#' @param id integer required. The ratecard ID. -#' @param filename string optional. Name of the ratecard file. -#' @param start_on string optional. First day to which the ratecard applies. -#' @param end_on string optional. Last day to which the ratecard applies. -#' @param dma_number integer optional. Number of the DMA associated with the ratecard. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. +#' +#' @return An empty HTTP response +#' @export +notebooks_delete_shares_users <- function(id, user_id) { + + args <- as.list(match.call())[-1] + path <- "/notebooks/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ratecard ID.} -#' \item{filename}{string, Name of the ratecard file.} -#' \item{startOn}{string, First day to which the ratecard applies.} -#' \item{endOn}{string, Last day to which the ratecard applies.} -#' \item{dmaNumber}{integer, Number of the DMA associated with the ratecard.} -#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -media_patch_ratecards <- function(id, filename = NULL, start_on = NULL, end_on = NULL, dma_number = NULL) { +notebooks_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/media/ratecards/{id}" + path <- "/notebooks/{id}/shares/groups" path_params <- list(id = id) query_params <- list() - body_params <- list(filename = filename, startOn = start_on, endOn = end_on, dmaNumber = dma_number) + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PATCH", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' List all Designated Market Areas -#' @param name string optional. If specified, will be used to filter the DMAs returned. Substring matching is supported with "\%" and "*" wildcards (e.g., "name=\%region\%" will return both "region1" and "my region"). -#' @param number integer optional. If specified, will be used to filter the DMAS by number. +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. #' -#' @return An array containing the following fields: -#' \item{name}{string, Name for the DMA region.} -#' \item{number}{integer, Identifier number for a DMA.} +#' @return An empty HTTP response #' @export -media_list_dmas <- function(name = NULL, number = NULL) { +notebooks_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/media/dmas" - path_params <- list() - query_params <- list(name = name, number = number) + path <- "/notebooks/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' List all Media Targets -#' @param name string optional. The name of the target. -#' @param identifier string optional. A unique identifier for this target. -#' @param data_source string optional. The source of viewership data for this target. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' #' @return An array containing the following fields: -#' \item{name}{string, The name of the target.} -#' \item{identifier}{string, A unique identifier for this target.} -#' \item{dataSource}{string, The source of viewership data for this target.} +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -media_list_targets <- function(name = NULL, identifier = NULL, data_source = NULL) { +notebooks_list_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/media/targets" - path_params <- list() - query_params <- list(name = name, identifier = identifier, data_source = data_source) + path <- "/notebooks/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -12919,80 +15017,55 @@ media_list_targets <- function(name = NULL, identifier = NULL, data_source = NUL } -#' List all available model types +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the model type.} -#' \item{algorithm}{string, The name of the algorithm used to train the model.} -#' \item{dvType}{string, The type of dependent variable predicted by the model.} -#' \item{fintAllowed}{boolean, Whether this model type supports searching for interaction terms.} +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} #' @export -models_list_types <- function() { +notebooks_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/models/types" - path_params <- list() + path <- "/notebooks/{id}/transfer" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' List -#' @param model_name string optional. If specified, will be used to filter the models returned. Substring matching is supported. (e.g., "modelName=model" will return both "model1" and "my model"). -#' @param training_table_name string optional. If specified, will be used to filter the models returned by the training dataset table name. Substring matching is supported. (e.g., "trainingTableName=table" will return both "table1" and "my_table"). -#' @param dependent_variable string optional. If specified, will be used to filter the models returned by the dependent variable column name. Substring matching is supported. (e.g., "dependentVariable=predictor" will return both "predictor" and "my predictor"). -#' @param author string optional. If specified, return models from this author. It accepts a comma-separated list of author ids. -#' @param status string optional. If specified, returns models with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. -#' @param archived string optional. The archival status of the requested item(s). -#' @param limit integer optional. Number of results to return. Defaults to its maximum of 50. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the model.} -#' \item{tableName}{string, The qualified name of the table containing the training set from which to build the model.} -#' \item{databaseId}{integer, The ID of the database holding the training set table used to build the model.} -#' \item{credentialId}{integer, The ID of the credential used to read the target table. Defaults to the user's default credential.} -#' \item{modelName}{string, The name of the model.} -#' \item{description}{string, A description of the model.} -#' \item{interactionTerms}{boolean, Whether to search for interaction terms.} -#' \item{boxCoxTransformation}{boolean, Whether to transform data so that it assumes a normal distribution. Valid only with continuous models.} -#' \item{modelTypeId}{integer, The ID of the model's type.} -#' \item{primaryKey}{string, The unique ID (primary key) of the training dataset.} -#' \item{dependentVariable}{string, The dependent variable of the training dataset.} -#' \item{dependentVariableOrder}{array, The order of dependent variables, especially useful for Ordinal Modeling.} -#' \item{excludedColumns}{array, A list of columns which will be considered ineligible to be independent variables.} -#' \item{limitingSQL}{string, A custom SQL WHERE clause used to filter the rows used to build the model. (e.g., "id > 105").} -#' \item{crossValidationParameters}{list, Cross validation parameter grid for tree methods, e.g. {"n_estimators": [100, 200, 500], "learning_rate": [0.01, 0.1], "max_depth": [2, 3]}.} -#' \item{numberOfFolds}{integer, Number of folds for cross validation. Default value is 5.} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, The ID of the parent job that will trigger this model.} -#' \item{timeZone}{string, The time zone of this model.} -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for this notebook.} +#' \item{name}{string, The name of this notebook.} +#' \item{language}{string, The kernel language of this notebook.} +#' \item{description}{string, The description of this notebook.} +#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} +#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} +#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} +#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} +#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} #' \item{user}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -13001,93 +15074,69 @@ models_list_types <- function() { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{createdAt}{string, The time the model was created.} -#' \item{updatedAt}{string, The time the model was updated.} -#' \item{currentBuildState}{string, The status of the current model build. One of "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted.} -#' \item{currentBuildException}{string, Exception message, if applicable, of the current model build.} -#' \item{builds}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of the model build. -#' \item name string, The name of the model build. -#' \item createdAt string, The time the model build was created. -#' \item description string, A description of the model build. -#' \item rootMeanSquaredError number, A key metric for continuous models. Nil for other model types. -#' \item rSquaredError number, A key metric for continuous models. Nil for other model types. -#' \item rocAuc number, A key metric for binary, multinomial, and ordinal models. Nil for other model types. -#' }} -#' \item{predictions}{array, An array containing the following fields: +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{instanceType}{string, The EC2 instance type to deploy to.} +#' \item{memory}{integer, The amount of memory allocated to the notebook.} +#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{mostRecentDeployment}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID of the model to which to apply the prediction. -#' \item tableName string, The qualified name of the table on which to apply the predictive model. -#' \item primaryKey array, The primary key or composite keys of the table being predicted. -#' \item limitingSQL string, A SQL WHERE clause used to scope the rows to be predicted. -#' \item outputTable string, The qualified name of the table to be created which will contain the model's predictions. -#' \item state string, The status of the prediction. One of: "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted. +#' \item deploymentId integer, The ID for this deployment. +#' \item userId integer, The ID of the owner. +#' \item host string, Domain of the deployment. +#' \item name string, Name of the deployment. +#' \item dockerImageName string, The name of the docker image to pull from DockerHub. +#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). +#' \item displayUrl string, A signed URL for viewing the deployed item. +#' \item instanceType string, The EC2 instance type requested for the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. +#' \item state string, The state of the deployment. +#' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. +#' \item createdAt string, +#' \item updatedAt string, +#' \item notebookId integer, The ID of owning Notebook #' }} -#' \item{lastOutputLocation}{string, The output JSON for the last build.} +#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} +#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} +#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{gitRepoId}{integer, The ID of the git repository.} +#' \item{gitRepoUrl}{string, The url of the git repository} +#' \item{gitRef}{string, The git reference if git repo is specified} +#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} +#' \item{hidden}{boolean, The hidden status of the item.} #' @export -models_list <- function(model_name = NULL, training_table_name = NULL, dependent_variable = NULL, author = NULL, status = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +notebooks_put_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/models/" - path_params <- list() - query_params <- list(model_name = model_name, training_table_name = training_table_name, dependent_variable = dependent_variable, author = author, status = status, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) - body_params <- list() + path <- "/notebooks/{id}/archive" + path_params <- list(id = id) + query_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Retrieve model configuration -#' @param id integer required. The ID of the model. +#' List the projects a Notebook belongs to +#' @param id integer required. The ID of the Notebook. +#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the model.} -#' \item{tableName}{string, The qualified name of the table containing the training set from which to build the model.} -#' \item{databaseId}{integer, The ID of the database holding the training set table used to build the model.} -#' \item{credentialId}{integer, The ID of the credential used to read the target table. Defaults to the user's default credential.} -#' \item{modelName}{string, The name of the model.} -#' \item{description}{string, A description of the model.} -#' \item{interactionTerms}{boolean, Whether to search for interaction terms.} -#' \item{boxCoxTransformation}{boolean, Whether to transform data so that it assumes a normal distribution. Valid only with continuous models.} -#' \item{modelTypeId}{integer, The ID of the model's type.} -#' \item{primaryKey}{string, The unique ID (primary key) of the training dataset.} -#' \item{dependentVariable}{string, The dependent variable of the training dataset.} -#' \item{dependentVariableOrder}{array, The order of dependent variables, especially useful for Ordinal Modeling.} -#' \item{excludedColumns}{array, A list of columns which will be considered ineligible to be independent variables.} -#' \item{limitingSQL}{string, A custom SQL WHERE clause used to filter the rows used to build the model. (e.g., "id > 105").} -#' \item{activeBuildId}{integer, The ID of the current active build, the build used to score predictions.} -#' \item{crossValidationParameters}{list, Cross validation parameter grid for tree methods, e.g. {"n_estimators": [100, 200, 500], "learning_rate": [0.01, 0.1], "max_depth": [2, 3]}.} -#' \item{numberOfFolds}{integer, Number of folds for cross validation. Default value is 5.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, The ID of the parent job that will trigger this model.} -#' \item{runningAs}{list, A list containing the following elements: +#' @return An array containing the following fields: +#' \item{id}{integer, The ID for this project.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -13095,18 +15144,9 @@ models_list <- function(model_name = NULL, training_table_name = NULL, dependent #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{timeZone}{string, The time zone of this model.} -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' \item{hidden}{boolean, The hidden status of the item.} -#' \item{user}{list, A list containing the following elements: +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -13114,43 +15154,110 @@ models_list <- function(model_name = NULL, training_table_name = NULL, dependent #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{createdAt}{string, The time the model was created.} -#' \item{updatedAt}{string, The time the model was updated.} -#' \item{currentBuildState}{string, The status of the current model build. One of "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted.} -#' \item{currentBuildException}{string, Exception message, if applicable, of the current model build.} -#' \item{builds}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of the model build. -#' \item name string, The name of the model build. -#' \item createdAt string, The time the model build was created. -#' \item description string, A description of the model build. -#' \item rootMeanSquaredError number, A key metric for continuous models. Nil for other model types. -#' \item rSquaredError number, A key metric for continuous models. Nil for other model types. -#' \item rocAuc number, A key metric for binary, multinomial, and ordinal models. Nil for other model types. -#' }} -#' \item{predictions}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of the model to which to apply the prediction. -#' \item tableName string, The qualified name of the table on which to apply the predictive model. -#' \item primaryKey array, The primary key or composite keys of the table being predicted. -#' \item limitingSQL string, A SQL WHERE clause used to scope the rows to be predicted. -#' \item outputTable string, The qualified name of the table to be created which will contain the model's predictions. -#' \item schedule object, -#' \item state string, The status of the prediction. One of: "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted. -#' }} -#' \item{lastOutputLocation}{string, The output JSON for the last build.} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' \item{archived}{string, The archival status of the requested item(s).} #' @export -models_get <- function(id) { +notebooks_list_projects <- function(id, hidden = NULL) { + + args <- as.list(match.call())[-1] + path <- "/notebooks/{id}/projects" + path_params <- list(id = id) + query_params <- list(hidden = hidden) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Add a Notebook to a project +#' @param id integer required. The ID of the Notebook. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +notebooks_put_projects <- function(id, project_id) { + + args <- as.list(match.call())[-1] + path <- "/notebooks/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Remove a Notebook from a project +#' @param id integer required. The ID of the Notebook. +#' @param project_id integer required. The ID of the project. +#' +#' @return An empty HTTP response +#' @export +notebooks_delete_projects <- function(id, project_id) { args <- as.list(match.call())[-1] - path <- "/models/{id}" - path_params <- list(id = id) + path <- "/notebooks/{id}/projects/{project_id}" + path_params <- list(id = id, project_id = project_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List deployments for a Notebook +#' @param notebook_id integer required. The ID of the owning Notebook +#' @param deployment_id integer optional. The ID for this deployment +#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' +#' @return An array containing the following fields: +#' \item{deploymentId}{integer, The ID for this deployment.} +#' \item{userId}{integer, The ID of the owner.} +#' \item{host}{string, Domain of the deployment.} +#' \item{name}{string, Name of the deployment.} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} +#' \item{memory}{integer, The memory allocated to the deployment, in MB.} +#' \item{cpu}{integer, The cpu allocated to the deployment, in millicores.} +#' \item{state}{string, The state of the deployment.} +#' \item{stateMessage}{string, A detailed description of the state.} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{notebookId}{integer, The ID of owning Notebook} +#' @export +notebooks_list_deployments <- function(notebook_id, deployment_id = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/notebooks/{notebook_id}/deployments" + path_params <- list(notebook_id = notebook_id) + query_params <- list(deployment_id = deployment_id, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) @@ -13158,120 +15265,128 @@ models_get <- function(id) { } -#' Check status of a build -#' @param id integer required. The ID of the model. -#' @param build_id integer required. The ID of the build. +#' Deploy a Notebook +#' @param notebook_id integer required. The ID of the owning Notebook +#' @param deployment_id integer optional. The ID for this deployment #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the model build.} -#' \item{state}{string, The state of the model build.one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{error}{string, The error, if any, returned by the build.} -#' \item{name}{string, The name of the model build.} -#' \item{createdAt}{string, The time the model build was created.} -#' \item{description}{string, A description of the model build.} -#' \item{rootMeanSquaredError}{number, A key metric for continuous models. Nil for other model types.} -#' \item{rSquaredError}{number, A key metric for continuous models. Nil for other model types.} -#' \item{rocAuc}{number, A key metric for binary, multinomial, and ordinal models. Nil for other model types.} -#' \item{transformationMetadata}{string, A string representing the full JSON output of the metadata for transformation of column names} -#' \item{output}{string, A string representing the JSON output for the specified build. Only present when smaller than 10KB in size.} -#' \item{outputLocation}{string, A URL representing the location of the full JSON output for the specified build.The URL link will be valid for 5 minutes.} +#' \item{deploymentId}{integer, The ID for this deployment.} +#' \item{userId}{integer, The ID of the owner.} +#' \item{host}{string, Domain of the deployment.} +#' \item{name}{string, Name of the deployment.} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{displayUrl}{string, A signed URL for viewing the deployed item.} +#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} +#' \item{memory}{integer, The memory allocated to the deployment, in MB.} +#' \item{cpu}{integer, The cpu allocated to the deployment, in millicores.} +#' \item{state}{string, The state of the deployment.} +#' \item{stateMessage}{string, A detailed description of the state.} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{notebookId}{integer, The ID of owning Notebook} #' @export -models_get_builds <- function(id, build_id) { +notebooks_post_deployments <- function(notebook_id, deployment_id = NULL) { args <- as.list(match.call())[-1] - path <- "/models/{id}/builds/{build_id}" - path_params <- list(id = id, build_id = build_id) + path <- "/notebooks/{notebook_id}/deployments" + path_params <- list(notebook_id = notebook_id) query_params <- list() - body_params <- list() + body_params <- list(deploymentId = deployment_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Cancel a build -#' @param id integer required. The ID of the model. -#' @param build_id integer required. The ID of the build. +#' Get details about a Notebook deployment +#' @param notebook_id integer required. The ID of the owning Notebook +#' @param deployment_id integer required. The ID for this deployment #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{deploymentId}{integer, The ID for this deployment.} +#' \item{userId}{integer, The ID of the owner.} +#' \item{host}{string, Domain of the deployment.} +#' \item{name}{string, Name of the deployment.} +#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} +#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} +#' \item{displayUrl}{string, A signed URL for viewing the deployed item.} +#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} +#' \item{memory}{integer, The memory allocated to the deployment, in MB.} +#' \item{cpu}{integer, The cpu allocated to the deployment, in millicores.} +#' \item{state}{string, The state of the deployment.} +#' \item{stateMessage}{string, A detailed description of the state.} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{notebookId}{integer, The ID of owning Notebook} #' @export -models_delete_builds <- function(id, build_id) { +notebooks_get_deployments <- function(notebook_id, deployment_id) { args <- as.list(match.call())[-1] - path <- "/models/{id}/builds/{build_id}" - path_params <- list(id = id, build_id = build_id) + path <- "/notebooks/{notebook_id}/deployments/{deployment_id}" + path_params <- list(notebook_id = notebook_id, deployment_id = deployment_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List builds for the given model -#' @param id integer required. The ID of the model. -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100. -#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' Delete a Notebook deployment +#' @param notebook_id integer required. The ID of the owning Notebook +#' @param deployment_id integer required. The ID for this deployment #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID of the model build.} -#' \item{state}{string, The state of the model build.one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} -#' \item{error}{string, The error, if any, returned by the build.} -#' \item{name}{string, The name of the model build.} -#' \item{createdAt}{string, The time the model build was created.} -#' \item{description}{string, A description of the model build.} -#' \item{rootMeanSquaredError}{number, A key metric for continuous models. Nil for other model types.} -#' \item{rSquaredError}{number, A key metric for continuous models. Nil for other model types.} -#' \item{rocAuc}{number, A key metric for binary, multinomial, and ordinal models. Nil for other model types.} -#' \item{transformationMetadata}{string, A string representing the full JSON output of the metadata for transformation of column names} -#' \item{output}{string, A string representing the JSON output for the specified build. Only present when smaller than 10KB in size.} -#' \item{outputLocation}{string, A URL representing the location of the full JSON output for the specified build.The URL link will be valid for 5 minutes.} +#' @return An empty HTTP response #' @export -models_list_builds <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +notebooks_delete_deployments <- function(notebook_id, deployment_id) { args <- as.list(match.call())[-1] - path <- "/models/{id}/builds" - path_params <- list(id = id) - query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/notebooks/{notebook_id}/deployments/{deployment_id}" + path_params <- list(notebook_id = notebook_id, deployment_id = deployment_id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Get the logs for a build -#' @param id integer required. The ID of the model. -#' @param build_id integer required. The ID of the build. -#' @param last_id integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt. +#' Get the logs for a Notebook deployment +#' @param id integer required. The ID of the owning Notebook. +#' @param deployment_id integer required. The ID for this deployment. +#' @param start_at string optional. Log entries with a lower timestamp will be omitted. +#' @param end_at string optional. Log entries with a higher timestamp will be omitted. #' @param limit integer optional. The maximum number of log messages to return. Default of 10000. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of the log.} -#' \item{createdAt}{string, The time the log was created.} #' \item{message}{string, The log message.} -#' \item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +#' \item{stream}{string, The stream of the log. One of "stdout", "stderr".} +#' \item{createdAt}{string, The time the log was created.} +#' \item{source}{string, The source of the log. One of "system", "user".} #' @export -models_list_builds_logs <- function(id, build_id, last_id = NULL, limit = NULL) { +notebooks_list_deployments_logs <- function(id, deployment_id, start_at = NULL, end_at = NULL, limit = NULL) { args <- as.list(match.call())[-1] - path <- "/models/{id}/builds/{build_id}/logs" - path_params <- list(id = id, build_id = build_id) - query_params <- list(last_id = last_id, limit = limit) + path <- "/notebooks/{id}/deployments/{deployment_id}/logs" + path_params <- list(id = id, deployment_id = deployment_id) + query_params <- list(start_at = start_at, end_at = end_at, limit = limit) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -13283,32 +15398,27 @@ models_list_builds_logs <- function(id, build_id, last_id = NULL, limit = NULL) } -#' List users and groups permissioned on this object -#' @param id integer required. The ID of the resource that is shared. +#' Get the git metadata attached to an item +#' @param id integer required. The ID of the file. #' -#' @return An array containing the following fields: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -models_list_shares <- function(id) { +notebooks_list_git <- function(id) { args <- as.list(match.call())[-1] - path <- "/models/{id}/shares" + path <- "/notebooks/{id}/git" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -13322,39 +15432,36 @@ models_list_shares <- function(id) { } -#' Set the permissions users have on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_ids array required. An array of one or more user IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Attach an item to a file in a git repo +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: #' \itemize{ -#' \item users array, -#' \item groups array, +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, #' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -models_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +notebooks_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] - path <- "/models/{id}/shares/users" + path <- "/notebooks/{id}/git" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -13365,130 +15472,117 @@ models_put_shares_users <- function(id, user_ids, permission_level, share_email_ } -#' Revoke the permissions a user has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' Update an attached git file +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -models_delete_shares_users <- function(id, user_id) { +notebooks_patch_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] - path <- "/models/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/notebooks/{id}/git" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Set the permissions groups has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' Get the git commits for an item on the current branch +#' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' \item{commitHash}{string, The SHA of the commit.} +#' \item{authorName}{string, The name of the commit's author.} +#' \item{date}{string, The commit's timestamp.} +#' \item{message}{string, The commit message.} #' @export -models_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +notebooks_list_git_commits <- function(id) { args <- as.list(match.call())[-1] - path <- "/models/{id}/shares/groups" + path <- "/notebooks/{id}/git/commits" path_params <- list(id = id) query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object -#' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' Commit and push a new version of the file +#' @param id integer required. The ID of the file. +#' @param content string required. The contents to commit to the file. +#' @param message string required. A commit message describing the changes being made. +#' @param file_hash string required. The full SHA of the file being replaced. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} #' @export -models_delete_shares_groups <- function(id, group_id) { +notebooks_post_git_commits <- function(id, content, message, file_hash) { args <- as.list(match.call())[-1] - path <- "/models/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) + path <- "/notebooks/{id}/git/commits" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(content = content, message = message, fileHash = file_hash) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' List the projects a Model belongs to -#' @param id integer required. The ID of the Model. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' Get file contents at git ref +#' @param id integer required. The ID of the file. +#' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} #' @export -models_list_projects <- function(id, hidden = NULL) { +notebooks_get_git_commits <- function(id, commit_hash) { args <- as.list(match.call())[-1] - path <- "/models/{id}/projects" - path_params <- list(id = id) - query_params <- list(hidden = hidden) + path <- "/notebooks/{id}/git/commits/{commit_hash}" + path_params <- list(id = id, commit_hash = commit_hash) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -13500,187 +15594,97 @@ models_list_projects <- function(id, hidden = NULL) { } -#' Add a Model to a project -#' @param id integer required. The ID of the Model. -#' @param project_id integer required. The ID of the project. +#' Checkout latest commit on the current branch of a script or workflow +#' @param id integer required. The ID of the file. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} #' @export -models_put_projects <- function(id, project_id) { +notebooks_post_git_checkout_latest <- function(id) { args <- as.list(match.call())[-1] - path <- "/models/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/notebooks/{id}/git/checkout-latest" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) } -#' Remove a Model from a project -#' @param id integer required. The ID of the Model. -#' @param project_id integer required. The ID of the project. +#' Checkout content that the existing git_ref points to and save to the object +#' @param id integer required. The ID of the file. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} #' @export -models_delete_projects <- function(id, project_id) { +notebooks_post_git_checkout <- function(id) { args <- as.list(match.call())[-1] - path <- "/models/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/notebooks/{id}/git/checkout" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("POST", path, path_params, query_params, body_params) return(resp) - } - - -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the model.} -#' \item{tableName}{string, The qualified name of the table containing the training set from which to build the model.} -#' \item{databaseId}{integer, The ID of the database holding the training set table used to build the model.} -#' \item{credentialId}{integer, The ID of the credential used to read the target table. Defaults to the user's default credential.} -#' \item{modelName}{string, The name of the model.} -#' \item{description}{string, A description of the model.} -#' \item{interactionTerms}{boolean, Whether to search for interaction terms.} -#' \item{boxCoxTransformation}{boolean, Whether to transform data so that it assumes a normal distribution. Valid only with continuous models.} -#' \item{modelTypeId}{integer, The ID of the model's type.} -#' \item{primaryKey}{string, The unique ID (primary key) of the training dataset.} -#' \item{dependentVariable}{string, The dependent variable of the training dataset.} -#' \item{dependentVariableOrder}{array, The order of dependent variables, especially useful for Ordinal Modeling.} -#' \item{excludedColumns}{array, A list of columns which will be considered ineligible to be independent variables.} -#' \item{limitingSQL}{string, A custom SQL WHERE clause used to filter the rows used to build the model. (e.g., "id > 105").} -#' \item{activeBuildId}{integer, The ID of the current active build, the build used to score predictions.} -#' \item{crossValidationParameters}{list, Cross validation parameter grid for tree methods, e.g. {"n_estimators": [100, 200, 500], "learning_rate": [0.01, 0.1], "max_depth": [2, 3]}.} -#' \item{numberOfFolds}{integer, Number of folds for cross validation. Default value is 5.} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{parentId}{integer, The ID of the parent job that will trigger this model.} -#' \item{runningAs}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{timeZone}{string, The time zone of this model.} -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' \item{hidden}{boolean, The hidden status of the item.} -#' \item{user}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{createdAt}{string, The time the model was created.} -#' \item{updatedAt}{string, The time the model was updated.} -#' \item{currentBuildState}{string, The status of the current model build. One of "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted.} -#' \item{currentBuildException}{string, Exception message, if applicable, of the current model build.} -#' \item{builds}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of the model build. -#' \item name string, The name of the model build. -#' \item createdAt string, The time the model build was created. -#' \item description string, A description of the model build. -#' \item rootMeanSquaredError number, A key metric for continuous models. Nil for other model types. -#' \item rSquaredError number, A key metric for continuous models. Nil for other model types. -#' \item rocAuc number, A key metric for binary, multinomial, and ordinal models. Nil for other model types. -#' }} -#' \item{predictions}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID of the model to which to apply the prediction. -#' \item tableName string, The qualified name of the table on which to apply the predictive model. -#' \item primaryKey array, The primary key or composite keys of the table being predicted. -#' \item limitingSQL string, A SQL WHERE clause used to scope the rows to be predicted. -#' \item outputTable string, The qualified name of the table to be created which will contain the model's predictions. -#' \item schedule object, -#' \item state string, The status of the prediction. One of: "succeeded", "failed", "queued", or "running,"or "idle", if no build has been attempted. -#' }} -#' \item{lastOutputLocation}{string, The output JSON for the last build.} -#' \item{archived}{string, The archival status of the requested item(s).} + } + + +#' Receive a stream of notifications as they come in +#' @param last_event_id string optional. allows browser to keep track of last event fired +#' @param r string optional. specifies retry/reconnect timeout +#' @param mock string optional. used for testing +#' +#' @return An empty HTTP response #' @export -models_put_archive <- function(id, status) { +notifications_list <- function(last_event_id = NULL, r = NULL, mock = NULL) { args <- as.list(match.call())[-1] - path <- "/models/{id}/archive" - path_params <- list(id = id) - query_params <- list() - body_params <- list(status = status) + path <- "/notifications/" + path_params <- list() + query_params <- list(last_event_id = last_event_id, r = r, mock = mock) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Show the model build schedule -#' @param id integer required. The ID of the model associated with this schedule. +#' List the ontology of column names Civis uses +#' @param subset string optional. A subset of fields to return. #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID of the model associated with this schedule.} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} +#' @return An array containing the following fields: +#' \item{key}{string, } +#' \item{title}{string, } +#' \item{desc}{string, A description of this field.} +#' \item{aliases}{array, } #' @export -models_list_schedules <- function(id) { +ontology_list <- function(subset = NULL) { args <- as.list(match.call())[-1] - path <- "/models/{id}/schedules" - path_params <- list(id = id) - query_params <- list() + path <- "/ontology/" + path_params <- list() + query_params <- list(subset = subset) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -13692,22 +15696,19 @@ models_list_schedules <- function(id) { } -#' List Notebooks -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' List Permission Sets #' @param archived string optional. The archival status of the requested item(s). -#' @param author string optional. If specified, return imports from this author. It accepts a comma-separated list of author IDs. -#' @param status string optional. If specified, returns notebooks with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'pending', 'idle'. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. #' @param order string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at. #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID for this notebook.} -#' \item{name}{string, The name of this notebook.} -#' \item{language}{string, The kernel language of this notebook.} -#' \item{description}{string, The description of this notebook.} -#' \item{user}{list, A list containing the following elements: +#' \item{id}{integer, The ID for this permission set.} +#' \item{name}{string, The name of this permission set.} +#' \item{description}{string, A description of this permission set.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -13717,32 +15718,14 @@ models_list_schedules <- function(id) { #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{mostRecentDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item notebookId integer, The ID of owning Notebook -#' }} #' \item{archived}{string, The archival status of the requested item(s).} #' @export -notebooks_list <- function(hidden = NULL, archived = NULL, author = NULL, status = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +permission_sets_list <- function(archived = NULL, author = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/" + path <- "/permission_sets/" path_params <- list() - query_params <- list(hidden = hidden, archived = archived, author = author, status = status, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(archived = archived, author = author, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -13754,37 +15737,15 @@ notebooks_list <- function(hidden = NULL, archived = NULL, author = NULL, status } -#' Create a Notebook -#' @param name string optional. The name of this notebook. -#' @param language string optional. The kernel language of this notebook. -#' @param description string optional. The description of this notebook. -#' @param file_id string optional. The file ID for the S3 file containing the .ipynb file. -#' @param requirements_file_id string optional. The file ID for the S3 file containing the requirements.txt file. -#' @param requirements string optional. The requirements txt file. -#' @param docker_image_name string optional. The name of the docker image to pull from DockerHub. -#' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub (default: latest). -#' @param instance_type string optional. The EC2 instance type to deploy to. -#' @param memory integer optional. The amount of memory allocated to the notebook. -#' @param cpu integer optional. The amount of cpu allocated to the the notebook. -#' @param credentials array optional. A list of credential IDs to pass to the notebook. -#' @param environment_variables list optional. Environment variables to be passed into the Notebook. -#' @param idle_timeout integer optional. How long the notebook will stay alive without any kernel activity. -#' @param git_repo_url string optional. The url of the git repository -#' @param git_ref string optional. The git reference if git repo is specified -#' @param git_path string optional. The path to the .ipynb file in the git repo that will be started up on notebook launch -#' @param hidden boolean optional. The hidden status of the item. +#' Create a Permission Set +#' @param name string required. The name of this permission set. +#' @param description string optional. A description of this permission set. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for this notebook.} -#' \item{name}{string, The name of this notebook.} -#' \item{language}{string, The kernel language of this notebook.} -#' \item{description}{string, The description of this notebook.} -#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} -#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} -#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} -#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} -#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} -#' \item{user}{list, A list containing the following elements: +#' \item{id}{integer, The ID for this permission set.} +#' \item{name}{string, The name of this permission set.} +#' \item{description}{string, A description of this permission set.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -13792,49 +15753,17 @@ notebooks_list <- function(hidden = NULL, archived = NULL, author = NULL, status #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{instanceType}{string, The EC2 instance type to deploy to.} -#' \item{memory}{integer, The amount of memory allocated to the notebook.} -#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{mostRecentDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item notebookId integer, The ID of owning Notebook -#' }} -#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} -#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} -#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} -#' \item{gitRepoId}{integer, The ID of the git repository.} -#' \item{gitRepoUrl}{string, The url of the git repository} -#' \item{gitRef}{string, The git reference if git repo is specified} -#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{hidden}{boolean, The hidden status of the item.} #' @export -notebooks_post <- function(name = NULL, language = NULL, description = NULL, file_id = NULL, requirements_file_id = NULL, requirements = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, environment_variables = NULL, idle_timeout = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL, hidden = NULL) { +permission_sets_post <- function(name, description = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/" + path <- "/permission_sets/" path_params <- list() query_params <- list() - body_params <- list(name = name, language = language, description = description, fileId = file_id, requirementsFileId = requirements_file_id, requirements = requirements, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, environmentVariables = environment_variables, idleTimeout = idle_timeout, gitRepoUrl = git_repo_url, gitRef = git_ref, gitPath = git_path, hidden = hidden) + body_params <- list(name = name, description = description) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -13845,20 +15774,14 @@ notebooks_post <- function(name = NULL, language = NULL, description = NULL, fil } -#' Get a Notebook +#' Get a Permission Set #' @param id integer required. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for this notebook.} -#' \item{name}{string, The name of this notebook.} -#' \item{language}{string, The kernel language of this notebook.} -#' \item{description}{string, The description of this notebook.} -#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} -#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} -#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} -#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} -#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} -#' \item{user}{list, A list containing the following elements: +#' \item{id}{integer, The ID for this permission set.} +#' \item{name}{string, The name of this permission set.} +#' \item{description}{string, A description of this permission set.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -13866,46 +15789,14 @@ notebooks_post <- function(name = NULL, language = NULL, description = NULL, fil #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{instanceType}{string, The EC2 instance type to deploy to.} -#' \item{memory}{integer, The amount of memory allocated to the notebook.} -#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{mostRecentDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item notebookId integer, The ID of owning Notebook -#' }} -#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} -#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} -#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} -#' \item{gitRepoId}{integer, The ID of the git repository.} -#' \item{gitRepoUrl}{string, The url of the git repository} -#' \item{gitRef}{string, The git reference if git repo is specified} -#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{hidden}{boolean, The hidden status of the item.} #' @export -notebooks_get <- function(id) { +permission_sets_get <- function(id) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}" + path <- "/permission_sets/{id}" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -13919,37 +15810,16 @@ notebooks_get <- function(id) { } -#' Replace all attributes of this Notebook -#' @param id integer required. The ID for this notebook. -#' @param name string optional. The name of this notebook. -#' @param language string optional. The kernel language of this notebook. -#' @param description string optional. The description of this notebook. -#' @param file_id string optional. The file ID for the S3 file containing the .ipynb file. -#' @param requirements_file_id string optional. The file ID for the S3 file containing the requirements.txt file. -#' @param requirements string optional. The requirements txt file. -#' @param docker_image_name string optional. The name of the docker image to pull from DockerHub. -#' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub (default: latest). -#' @param instance_type string optional. The EC2 instance type to deploy to. -#' @param memory integer optional. The amount of memory allocated to the notebook. -#' @param cpu integer optional. The amount of cpu allocated to the the notebook. -#' @param credentials array optional. A list of credential IDs to pass to the notebook. -#' @param environment_variables list optional. Environment variables to be passed into the Notebook. -#' @param idle_timeout integer optional. How long the notebook will stay alive without any kernel activity. -#' @param git_repo_url string optional. The url of the git repository -#' @param git_ref string optional. The git reference if git repo is specified -#' @param git_path string optional. The path to the .ipynb file in the git repo that will be started up on notebook launch +#' Replace all attributes of this Permission Set +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this permission set. +#' @param description string optional. A description of this permission set. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for this notebook.} -#' \item{name}{string, The name of this notebook.} -#' \item{language}{string, The kernel language of this notebook.} -#' \item{description}{string, The description of this notebook.} -#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} -#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} -#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} -#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} -#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} -#' \item{user}{list, A list containing the following elements: +#' \item{id}{integer, The ID for this permission set.} +#' \item{name}{string, The name of this permission set.} +#' \item{description}{string, A description of this permission set.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -13957,49 +15827,17 @@ notebooks_get <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{instanceType}{string, The EC2 instance type to deploy to.} -#' \item{memory}{integer, The amount of memory allocated to the notebook.} -#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{mostRecentDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item notebookId integer, The ID of owning Notebook -#' }} -#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} -#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} -#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} -#' \item{gitRepoId}{integer, The ID of the git repository.} -#' \item{gitRepoUrl}{string, The url of the git repository} -#' \item{gitRef}{string, The git reference if git repo is specified} -#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{hidden}{boolean, The hidden status of the item.} #' @export -notebooks_put <- function(id, name = NULL, language = NULL, description = NULL, file_id = NULL, requirements_file_id = NULL, requirements = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, environment_variables = NULL, idle_timeout = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL) { +permission_sets_put <- function(id, name, description = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}" + path <- "/permission_sets/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, language = language, description = description, fileId = file_id, requirementsFileId = requirements_file_id, requirements = requirements, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, environmentVariables = environment_variables, idleTimeout = idle_timeout, gitRepoUrl = git_repo_url, gitRef = git_ref, gitPath = git_path) + body_params <- list(name = name, description = description) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -14010,37 +15848,16 @@ notebooks_put <- function(id, name = NULL, language = NULL, description = NULL, } -#' Update some attributes of this Notebook -#' @param id integer required. The ID for this notebook. -#' @param name string optional. The name of this notebook. -#' @param language string optional. The kernel language of this notebook. -#' @param description string optional. The description of this notebook. -#' @param file_id string optional. The file ID for the S3 file containing the .ipynb file. -#' @param requirements_file_id string optional. The file ID for the S3 file containing the requirements.txt file. -#' @param requirements string optional. The requirements txt file. -#' @param docker_image_name string optional. The name of the docker image to pull from DockerHub. -#' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub (default: latest). -#' @param instance_type string optional. The EC2 instance type to deploy to. -#' @param memory integer optional. The amount of memory allocated to the notebook. -#' @param cpu integer optional. The amount of cpu allocated to the the notebook. -#' @param credentials array optional. A list of credential IDs to pass to the notebook. -#' @param environment_variables list optional. Environment variables to be passed into the Notebook. -#' @param idle_timeout integer optional. How long the notebook will stay alive without any kernel activity. -#' @param git_repo_url string optional. The url of the git repository -#' @param git_ref string optional. The git reference if git repo is specified -#' @param git_path string optional. The path to the .ipynb file in the git repo that will be started up on notebook launch +#' Update some attributes of this Permission Set +#' @param id integer required. The ID for this permission set. +#' @param name string optional. The name of this permission set. +#' @param description string optional. A description of this permission set. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID for this notebook.} -#' \item{name}{string, The name of this notebook.} -#' \item{language}{string, The kernel language of this notebook.} -#' \item{description}{string, The description of this notebook.} -#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} -#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} -#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} -#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} -#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} -#' \item{user}{list, A list containing the following elements: +#' \item{id}{integer, The ID for this permission set.} +#' \item{name}{string, The name of this permission set.} +#' \item{description}{string, A description of this permission set.} +#' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. #' \item name string, This user's name. @@ -14048,49 +15865,17 @@ notebooks_put <- function(id, name = NULL, language = NULL, description = NULL, #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{instanceType}{string, The EC2 instance type to deploy to.} -#' \item{memory}{integer, The amount of memory allocated to the notebook.} -#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{mostRecentDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item notebookId integer, The ID of owning Notebook -#' }} -#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} -#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} -#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} -#' \item{gitRepoId}{integer, The ID of the git repository.} -#' \item{gitRepoUrl}{string, The url of the git repository} -#' \item{gitRef}{string, The git reference if git repo is specified} -#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} #' \item{archived}{string, The archival status of the requested item(s).} -#' \item{hidden}{boolean, The hidden status of the item.} #' @export -notebooks_patch <- function(id, name = NULL, language = NULL, description = NULL, file_id = NULL, requirements_file_id = NULL, requirements = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, environment_variables = NULL, idle_timeout = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL) { +permission_sets_patch <- function(id, name = NULL, description = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}" + path <- "/permission_sets/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, language = language, description = description, fileId = file_id, requirementsFileId = requirements_file_id, requirements = requirements, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, environmentVariables = environment_variables, idleTimeout = idle_timeout, gitRepoUrl = git_repo_url, gitRef = git_ref, gitPath = git_path) + body_params <- list(name = name, description = description) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -14101,126 +15886,6 @@ notebooks_patch <- function(id, name = NULL, language = NULL, description = NULL } -#' Archive a Notebook (deprecated, use archiving endpoints instead) -#' @param id integer required. -#' -#' @return An empty HTTP response -#' @export -notebooks_delete <- function(id) { - - args <- as.list(match.call())[-1] - path <- "/notebooks/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Get URLs to update notebook -#' @param id integer required. -#' -#' @return A list containing the following elements: -#' \item{updateUrl}{string, Time-limited URL to PUT new contents of the .ipynb file for this notebook.} -#' \item{updatePreviewUrl}{string, Time-limited URL to PUT new contents of the .htm preview file for this notebook.} -#' @export -notebooks_list_update_links <- function(id) { - - args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/update-links" - path_params <- list(id = id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Clone this Notebook -#' @param id integer required. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for this notebook.} -#' \item{name}{string, The name of this notebook.} -#' \item{language}{string, The kernel language of this notebook.} -#' \item{description}{string, The description of this notebook.} -#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} -#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} -#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} -#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} -#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} -#' \item{user}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{instanceType}{string, The EC2 instance type to deploy to.} -#' \item{memory}{integer, The amount of memory allocated to the notebook.} -#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{mostRecentDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item notebookId integer, The ID of owning Notebook -#' }} -#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} -#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} -#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} -#' \item{gitRepoId}{integer, The ID of the git repository.} -#' \item{gitRepoUrl}{string, The url of the git repository} -#' \item{gitRef}{string, The git reference if git repo is specified} -#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{hidden}{boolean, The hidden status of the item.} -#' @export -notebooks_post_clone <- function(id) { - - args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/clone" - path_params <- list(id = id) - query_params <- list() - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) - - return(resp) - - } - - #' List users and groups permissioned on this object #' @param id integer required. The ID of the resource that is shared. #' @@ -14243,10 +15908,10 @@ notebooks_post_clone <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -notebooks_list_shares <- function(id) { +permission_sets_list_shares <- function(id) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/shares" + path <- "/permission_sets/{id}/shares" path_params <- list(id = id) query_params <- list() body_params <- list() @@ -14286,10 +15951,10 @@ notebooks_list_shares <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -notebooks_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +permission_sets_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/shares/users" + path <- "/permission_sets/{id}/shares/users" path_params <- list(id = id) query_params <- list() body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -14309,10 +15974,10 @@ notebooks_put_shares_users <- function(id, user_ids, permission_level, share_ema #' #' @return An empty HTTP response #' @export -notebooks_delete_shares_users <- function(id, user_id) { +permission_sets_delete_shares_users <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/shares/users/{user_id}" + path <- "/permission_sets/{id}/shares/users/{user_id}" path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() @@ -14352,10 +16017,10 @@ notebooks_delete_shares_users <- function(id, user_id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -notebooks_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +permission_sets_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/shares/groups" + path <- "/permission_sets/{id}/shares/groups" path_params <- list(id = id) query_params <- list() body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) @@ -14375,10 +16040,10 @@ notebooks_put_shares_groups <- function(id, group_ids, permission_level, share_e #' #' @return An empty HTTP response #' @export -notebooks_delete_shares_groups <- function(id, group_id) { +permission_sets_delete_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/shares/groups/{group_id}" + path <- "/permission_sets/{id}/shares/groups/{group_id}" path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() @@ -14392,140 +16057,97 @@ notebooks_delete_shares_groups <- function(id, group_id) { } -#' Update the archive status of this object -#' @param id integer required. The ID of the object. -#' @param status boolean required. The desired archived status of the object. +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user #' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for this notebook.} -#' \item{name}{string, The name of this notebook.} -#' \item{language}{string, The kernel language of this notebook.} -#' \item{description}{string, The description of this notebook.} -#' \item{notebookUrl}{string, Time-limited URL to get the .ipynb file for this notebook.} -#' \item{notebookPreviewUrl}{string, Time-limited URL to get the .htm preview file for this notebook.} -#' \item{requirementsUrl}{string, Time-limited URL to get the requirements.txt file for this notebook.} -#' \item{fileId}{string, The file ID for the S3 file containing the .ipynb file.} -#' \item{requirementsFileId}{string, The file ID for the S3 file containing the requirements.txt file.} -#' \item{user}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{instanceType}{string, The EC2 instance type to deploy to.} -#' \item{memory}{integer, The amount of memory allocated to the notebook.} -#' \item{cpu}{integer, The amount of cpu allocated to the the notebook.} -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{mostRecentDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item notebookId integer, The ID of owning Notebook -#' }} -#' \item{credentials}{array, A list of credential IDs to pass to the notebook.} -#' \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} -#' \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} -#' \item{gitRepoId}{integer, The ID of the git repository.} -#' \item{gitRepoUrl}{string, The url of the git repository} -#' \item{gitRef}{string, The git reference if git repo is specified} -#' \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{hidden}{boolean, The hidden status of the item.} +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -notebooks_put_archive <- function(id, status) { +permission_sets_list_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/archive" + path <- "/permission_sets/{id}/dependencies" path_params <- list(id = id) - query_params <- list() - body_params <- list(status = status) + query_params <- list(user_id = user_id) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List the projects a Notebook belongs to -#' @param id integer required. The ID of the Notebook. -#' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' -#' @return An array containing the following fields: -#' \item{id}{integer, The ID for this project.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{name}{string, The name of this project.} -#' \item{description}{string, A description of the project.} -#' \item{users}{array, An array containing the following fields: +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: #' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user #' }} -#' \item{autoShare}{boolean, } -#' \item{createdAt}{string, } -#' \item{updatedAt}{string, } -#' \item{archived}{string, The archival status of the requested item(s).} #' @export -notebooks_list_projects <- function(id, hidden = NULL) { +permission_sets_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/projects" + path <- "/permission_sets/{id}/transfer" path_params <- list(id = id) - query_params <- list(hidden = hidden) - body_params <- list() + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Add a Notebook to a project -#' @param id integer required. The ID of the Notebook. -#' @param project_id integer required. The ID of the project. +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for this permission set.} +#' \item{name}{string, The name of this permission set.} +#' \item{description}{string, A description of this permission set.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{archived}{string, The archival status of the requested item(s).} #' @export -notebooks_put_projects <- function(id, project_id) { +permission_sets_put_archive <- function(id, status) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/permission_sets/{id}/archive" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(status = status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -14536,60 +16158,53 @@ notebooks_put_projects <- function(id, project_id) { } -#' Remove a Notebook from a project -#' @param id integer required. The ID of the Notebook. -#' @param project_id integer required. The ID of the project. +#' Get all permissions for a user, in this permission set +#' @param id integer required. The ID for this permission set. +#' @param user_id integer required. The ID for the user. #' -#' @return An empty HTTP response +#' @return An array containing the following fields: +#' \item{resourceName}{string, The name of the resource.} +#' \item{read}{boolean, If true, the user has read permission on this resource.} +#' \item{write}{boolean, If true, the user has write permission on this resource.} +#' \item{manage}{boolean, If true, the user has manage permission on this resource.} #' @export -notebooks_delete_projects <- function(id, project_id) { +permission_sets_list_users_permissions <- function(id, user_id) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/projects/{project_id}" - path_params <- list(id = id, project_id = project_id) + path <- "/permission_sets/{id}/users/{user_id}/permissions" + path_params <- list(id = id, user_id = user_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' List deployments for a Notebook -#' @param notebook_id integer required. The ID of the owning Notebook -#' @param deployment_id integer optional. The ID for this deployment -#' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. +#' List resources in a permission set +#' @param id integer required. The ID for this permission set. +#' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at. -#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' @param order string optional. The field on which to order the result set. Defaults to name. Must be one of: name, id, updated_at, created_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. #' #' @return An array containing the following fields: -#' \item{deploymentId}{integer, The ID for this deployment.} -#' \item{userId}{integer, The ID of the owner.} -#' \item{host}{string, Domain of the deployment.} -#' \item{name}{string, Name of the deployment.} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{memory}{integer, The memory allocated to the deployment.} -#' \item{cpu}{integer, The cpu allocated to the deployment.} -#' \item{state}{string, The state of the deployment.} -#' \item{stateMessage}{string, A detailed description of the state.} +#' \item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +#' \item{name}{string, The name of this resource.} +#' \item{description}{string, A description of this resource.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{published}{boolean, } -#' \item{notebookId}{integer, The ID of owning Notebook} #' @export -notebooks_list_deployments <- function(notebook_id, deployment_id = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +permission_sets_list_resources <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{notebook_id}/deployments" - path_params <- list(notebook_id = notebook_id) - query_params <- list(deployment_id = deployment_id, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + path <- "/permission_sets/{id}/resources" + path_params <- list(id = id) + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -14601,36 +16216,25 @@ notebooks_list_deployments <- function(notebook_id, deployment_id = NULL, limit } -#' Deploy a Notebook -#' @param notebook_id integer required. The ID of the owning Notebook -#' @param deployment_id integer optional. The ID for this deployment -#' @param published boolean optional. +#' Create a resource in a permission set +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. +#' @param description string optional. A description of this resource. #' #' @return A list containing the following elements: -#' \item{deploymentId}{integer, The ID for this deployment.} -#' \item{userId}{integer, The ID of the owner.} -#' \item{host}{string, Domain of the deployment.} -#' \item{name}{string, Name of the deployment.} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{displayUrl}{string, A signed URL for viewing the deployed item.} -#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{memory}{integer, The memory allocated to the deployment.} -#' \item{cpu}{integer, The cpu allocated to the deployment.} -#' \item{state}{string, The state of the deployment.} -#' \item{stateMessage}{string, A detailed description of the state.} +#' \item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +#' \item{name}{string, The name of this resource.} +#' \item{description}{string, A description of this resource.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{published}{boolean, } -#' \item{notebookId}{integer, The ID of owning Notebook} #' @export -notebooks_post_deployments <- function(notebook_id, deployment_id = NULL, published = NULL) { +permission_sets_post_resources <- function(id, name, description = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{notebook_id}/deployments" - path_params <- list(notebook_id = notebook_id) + path <- "/permission_sets/{id}/resources" + path_params <- list(id = id) query_params <- list() - body_params <- list(deploymentId = deployment_id, published = published) + body_params <- list(name = name, description = description) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -14641,33 +16245,22 @@ notebooks_post_deployments <- function(notebook_id, deployment_id = NULL, publis } -#' Get details about a Notebook deployment -#' @param notebook_id integer required. The ID of the owning Notebook -#' @param deployment_id integer required. The ID for this deployment +#' Get a resource in a permission set +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. #' #' @return A list containing the following elements: -#' \item{deploymentId}{integer, The ID for this deployment.} -#' \item{userId}{integer, The ID of the owner.} -#' \item{host}{string, Domain of the deployment.} -#' \item{name}{string, Name of the deployment.} -#' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} -#' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} -#' \item{displayUrl}{string, A signed URL for viewing the deployed item.} -#' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{memory}{integer, The memory allocated to the deployment.} -#' \item{cpu}{integer, The cpu allocated to the deployment.} -#' \item{state}{string, The state of the deployment.} -#' \item{stateMessage}{string, A detailed description of the state.} +#' \item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +#' \item{name}{string, The name of this resource.} +#' \item{description}{string, A description of this resource.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{published}{boolean, } -#' \item{notebookId}{integer, The ID of owning Notebook} #' @export -notebooks_get_deployments <- function(notebook_id, deployment_id) { +permission_sets_get_resources <- function(id, name) { args <- as.list(match.call())[-1] - path <- "/notebooks/{notebook_id}/deployments/{deployment_id}" - path_params <- list(notebook_id = notebook_id, deployment_id = deployment_id) + path <- "/permission_sets/{id}/resources/{name}" + path_params <- list(id = id, name = name) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -14680,80 +16273,86 @@ notebooks_get_deployments <- function(notebook_id, deployment_id) { } -#' Delete a Notebook deployment -#' @param notebook_id integer required. The ID of the owning Notebook -#' @param deployment_id integer required. The ID for this deployment +#' Update a resource in a permission set +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. +#' @param description string optional. A description of this resource. #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +#' \item{name}{string, The name of this resource.} +#' \item{description}{string, A description of this resource.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' @export -notebooks_delete_deployments <- function(notebook_id, deployment_id) { +permission_sets_patch_resources <- function(id, name, description = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{notebook_id}/deployments/{deployment_id}" - path_params <- list(notebook_id = notebook_id, deployment_id = deployment_id) + path <- "/permission_sets/{id}/resources/{name}" + path_params <- list(id = id, name = name) query_params <- list() - body_params <- list() + body_params <- list(description = description) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("PATCH", path, path_params, query_params, body_params) return(resp) } -#' Get the logs for a Notebook deployment -#' @param id integer required. The ID of the owning Notebook. -#' @param deployment_id integer required. The ID for this deployment. -#' @param start_at string optional. Log entries with a lower timestamp will be omitted. -#' @param end_at string optional. Log entries with a higher timestamp will be omitted. -#' @param limit integer optional. The maximum number of log messages to return. Default of 10000. +#' Delete a resource in a permission set +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. #' -#' @return An array containing the following fields: -#' \item{message}{string, The log message.} -#' \item{stream}{string, The stream of the log. One of "stdout", "stderr".} -#' \item{createdAt}{string, The time the log was created.} -#' \item{source}{string, The source of the log. One of "system", "user".} +#' @return An empty HTTP response #' @export -notebooks_list_deployments_logs <- function(id, deployment_id, start_at = NULL, end_at = NULL, limit = NULL) { +permission_sets_delete_resources <- function(id, name) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/deployments/{deployment_id}/logs" - path_params <- list(id = id, deployment_id = deployment_id) - query_params <- list(start_at = start_at, end_at = end_at, limit = limit) + path <- "/permission_sets/{id}/resources/{name}" + path_params <- list(id = id, name = name) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Get the git metadata attached to an item -#' @param id integer required. The ID of the file. +#' List users and groups permissioned on this object +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. #' -#' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} -#' \item{gitBranch}{string, The git branch that the file is on.} -#' \item{gitPath}{string, The path of the file in the repository.} -#' \item{gitRepo}{list, A list containing the following elements: +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID for this git repository. -#' \item repoUrl string, The URL for this git repository. -#' \item createdAt string, -#' \item updatedAt string, +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -notebooks_list_git <- function(id) { +permission_sets_list_resources_shares <- function(id, name) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/git" - path_params <- list(id = id) + path <- "/permission_sets/{id}/resources/{name}/shares" + path_params <- list(id = id, name = name) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -14766,34 +16365,40 @@ notebooks_list_git <- function(id) { } -#' Attach an item to a file in a git repo -#' @param id integer required. The ID of the file. -#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. -#' @param git_branch string optional. The git branch that the file is on. -#' @param git_path string optional. The path of the file in the repository. -#' @param git_repo_url string optional. The URL of the git repository. -#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. +#' Set the permissions users have on this object +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} -#' \item{gitBranch}{string, The git branch that the file is on.} -#' \item{gitPath}{string, The path of the file in the repository.} -#' \item{gitRepo}{list, A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, The ID for this git repository. -#' \item repoUrl string, The URL for this git repository. -#' \item createdAt string, -#' \item updatedAt string, +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -notebooks_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, pull_from_git = NULL) { +permission_sets_put_resources_shares_users <- function(id, name, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/git" - path_params <- list(id = id) + path <- "/permission_sets/{id}/resources/{name}/shares/users" + path_params <- list(id = id, name = name) query_params <- list() - body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, pullFromGit = pull_from_git) + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -14804,132 +16409,92 @@ notebooks_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = } -#' Get the git commits for an item -#' @param id integer required. The ID of the file. +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. +#' @param user_id integer required. The ID of the user. #' -#' @return A list containing the following elements: -#' \item{commitHash}{string, The SHA of the commit.} -#' \item{authorName}{string, The name of the commit's author.} -#' \item{date}{string, The commit's timestamp.} -#' \item{message}{string, The commit message.} +#' @return An empty HTTP response #' @export -notebooks_list_git_commits <- function(id) { +permission_sets_delete_resources_shares_users <- function(id, name, user_id) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/git/commits" - path_params <- list(id = id) + path <- "/permission_sets/{id}/resources/{name}/shares/users/{user_id}" + path_params <- list(id = id, name = name, user_id = user_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Commit and push a new version of the file -#' @param id integer required. The ID of the file. -#' @param content string required. The contents to commit to the file. -#' @param message string required. A commit message describing the changes being made. -#' @param file_hash string required. The full SHA of the file being replaced. -#' -#' @return A list containing the following elements: -#' \item{content}{string, The file's contents.} -#' \item{type}{string, The file's type.} -#' \item{size}{integer, The file's size.} -#' \item{fileHash}{string, The SHA of the file.} -#' @export -notebooks_post_git_commits <- function(id, content, message, file_hash) { - - args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/git/commits" - path_params <- list(id = id) - query_params <- list() - body_params <- list(content = content, message = message, fileHash = file_hash) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Get file contents at commit_hash -#' @param id integer required. The ID of the file. -#' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. +#' Set the permissions groups has on this object +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. #' #' @return A list containing the following elements: -#' \item{content}{string, The file's contents.} -#' \item{type}{string, The file's type.} -#' \item{size}{integer, The file's size.} -#' \item{fileHash}{string, The SHA of the file.} +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -notebooks_get_git_commits <- function(id, commit_hash) { +permission_sets_put_resources_shares_groups <- function(id, name, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/notebooks/{id}/git/commits/{commit_hash}" - path_params <- list(id = id, commit_hash = commit_hash) + path <- "/permission_sets/{id}/resources/{name}/shares/groups" + path_params <- list(id = id, name = name) query_params <- list() - body_params <- list() + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) } -#' Receive a stream of notifications as they come in -#' @param last_event_id string optional. allows browser to keep track of last event fired -#' @param r string optional. specifies retry/reconnect timeout -#' @param mock string optional. used for testing +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID for this permission set. +#' @param name string required. The name of this resource. +#' @param group_id integer required. The ID of the group. #' #' @return An empty HTTP response #' @export -notifications_list <- function(last_event_id = NULL, r = NULL, mock = NULL) { - - args <- as.list(match.call())[-1] - path <- "/notifications/" - path_params <- list() - query_params <- list(last_event_id = last_event_id, r = r, mock = mock) - body_params <- list() - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' List the ontology of column names Civis uses -#' @param subset string optional. A subset of fields to return. -#' -#' @return An array containing the following fields: -#' \item{key}{string, } -#' \item{title}{string, } -#' \item{desc}{string, A description of this field.} -#' \item{aliases}{array, } -#' @export -ontology_list <- function(subset = NULL) { +permission_sets_delete_resources_shares_groups <- function(id, name, group_id) { args <- as.list(match.call())[-1] - path <- "/ontology/" - path_params <- list() - query_params <- list(subset = subset) + path <- "/permission_sets/{id}/resources/{name}/shares/groups/{group_id}" + path_params <- list(id = id, name = name, group_id = group_id) + query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) @@ -15009,10 +16574,11 @@ predictions_list <- function(model_id = NULL) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{limitingSQL}{string, A SQL WHERE clause used to scope the rows to be predicted.} #' \item{primaryKey}{array, The primary key or composite keys of the table being predicted.} @@ -15042,10 +16608,11 @@ predictions_get <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{scoreOnModelBuild}{boolean, Whether the prediction will run after a rebuild of the associated model.} #' @export @@ -15067,8 +16634,9 @@ predictions_list_schedules <- function(id) { #' List projects -#' @param author string optional. If specified, return projects owned by this author. It accepts a comma-separated list of author ids. #' @param permission string optional. A permissions string, one of "read", "write", or "manage". Lists only projects for which the current user has that permission. +#' @param auto_share boolean optional. Used to filter projects based on whether the project is autoshare enabled or not. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' @param archived string optional. The archival status of the requested item(s). #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 1000. @@ -15101,12 +16669,12 @@ predictions_list_schedules <- function(id) { #' \item{updatedAt}{string, } #' \item{archived}{string, The archival status of the requested item(s).} #' @export -projects_list <- function(author = NULL, permission = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +projects_list <- function(permission = NULL, auto_share = NULL, author = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] path <- "/projects/" path_params <- list() - query_params <- list(author = author, permission = permission, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(permission = permission, auto_share = auto_share, author = author, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -15122,6 +16690,7 @@ projects_list <- function(author = NULL, permission = NULL, hidden = NULL, archi #' @param name string required. The name of this project. #' @param description string required. A description of the project. #' @param note string optional. Notes for the project. +#' @param auto_share boolean optional. If true, objects within the project will be automatically shared when the project is shared or objects are added. #' @param hidden boolean optional. The hidden status of the item. #' #' @return A list containing the following elements: @@ -15262,14 +16831,6 @@ projects_list <- function(author = NULL, permission = NULL, hidden = NULL, archi #' \item name string, #' \item lastRun object, #' }} -#' \item{appInstances}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The item's ID. -#' \item createdAt string, -#' \item updatedAt string, -#' \item name string, -#' \item slug string, -#' }} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The item's ID. @@ -15289,10 +16850,13 @@ projects_list <- function(author = NULL, permission = NULL, hidden = NULL, archi #' \item icon string, #' \item author string, #' \item updatedAt string, +#' \item autoShare boolean, #' \item archived string, The archival status of the requested item(s). #' \item hidden boolean, The hidden status of the item. +#' \item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". #' }} #' \item{note}{string, } +#' \item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{parentProject}{list, A list containing the following elements: @@ -15300,14 +16864,15 @@ projects_list <- function(author = NULL, permission = NULL, hidden = NULL, archi #' \item id integer, The parent project's ID. #' \item name integer, The parent project's name. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -projects_post <- function(name, description, note = NULL, hidden = NULL) { +projects_post <- function(name, description, note = NULL, auto_share = NULL, hidden = NULL) { args <- as.list(match.call())[-1] path <- "/projects/" path_params <- list() query_params <- list() - body_params <- list(name = name, description = description, note = note, hidden = hidden) + body_params <- list(name = name, description = description, note = note, autoShare = auto_share, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -15318,8 +16883,10 @@ projects_post <- function(name, description, note = NULL, hidden = NULL) { } -#' Get a detailed view of a project and the objects in it -#' @param project_id integer required. +#' Clone this +#' @param id integer required. The ID for this project. +#' @param clone_schedule boolean optional. If true, also copy the schedule for all applicable project objects. +#' @param clone_notifications boolean optional. If true, also copy the notifications for all applicable project objects. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for this project.} @@ -15459,13 +17026,198 @@ projects_post <- function(name, description, note = NULL, hidden = NULL) { #' \item name string, #' \item lastRun object, #' }} -#' \item{appInstances}{array, An array containing the following fields: +#' \item{projects}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item description string, +#' }} +#' \item{allObjects}{array, An array containing the following fields: +#' \itemize{ +#' \item projectId integer, +#' \item objectId integer, +#' \item objectType string, +#' \item fcoType string, +#' \item subType string, +#' \item name string, +#' \item icon string, +#' \item author string, +#' \item updatedAt string, +#' \item autoShare boolean, +#' \item archived string, The archival status of the requested item(s). +#' \item hidden boolean, The hidden status of the item. +#' \item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". +#' }} +#' \item{note}{string, } +#' \item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{parentProject}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The parent project's ID. +#' \item name integer, The parent project's name. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' @export +projects_post_clone <- function(id, clone_schedule = NULL, clone_notifications = NULL) { + + args <- as.list(match.call())[-1] + path <- "/projects/{id}/clone" + path_params <- list(id = id) + query_params <- list() + body_params <- list(cloneSchedule = clone_schedule, cloneNotifications = clone_notifications) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get a detailed view of a project and the objects in it +#' @param project_id integer required. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for this project.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{tables}{array, An array containing the following fields: +#' \itemize{ +#' \item schema string, +#' \item name string, +#' \item rowCount integer, +#' \item columnCount integer, +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{surveys}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{scripts}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item type string, +#' \item finishedAt string, +#' \item state string, +#' \item lastRun object, +#' }} +#' \item{imports}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item type string, +#' \item finishedAt string, +#' \item state string, +#' \item lastRun object, +#' }} +#' \item{exports}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item type string, +#' \item finishedAt string, +#' \item state string, +#' \item lastRun object, +#' }} +#' \item{models}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item state string, +#' }} +#' \item{notebooks}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item currentDeploymentId integer, +#' \item lastDeploy object, +#' }} +#' \item{services}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item currentDeploymentId integer, +#' \item lastDeploy object, +#' }} +#' \item{workflows}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item state string, +#' \item lastExecution object, +#' }} +#' \item{reports}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item state string, +#' }} +#' \item{scriptTemplates}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' }} +#' \item{files}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item fileName string, +#' \item fileSize integer, +#' \item expired boolean, +#' }} +#' \item{enhancements}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The item's ID. #' \item createdAt string, #' \item updatedAt string, #' \item name string, -#' \item slug string, +#' \item lastRun object, #' }} #' \item{projects}{array, An array containing the following fields: #' \itemize{ @@ -15486,10 +17238,13 @@ projects_post <- function(name, description, note = NULL, hidden = NULL) { #' \item icon string, #' \item author string, #' \item updatedAt string, +#' \item autoShare boolean, #' \item archived string, The archival status of the requested item(s). #' \item hidden boolean, The hidden status of the item. +#' \item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". #' }} #' \item{note}{string, } +#' \item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{parentProject}{list, A list containing the following elements: @@ -15497,6 +17252,7 @@ projects_post <- function(name, description, note = NULL, hidden = NULL) { #' \item id integer, The parent project's ID. #' \item name integer, The parent project's name. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export projects_get <- function(project_id) { @@ -15520,7 +17276,6 @@ projects_get <- function(project_id) { #' @param name string optional. The name of this project. #' @param description string optional. A description of the project. #' @param note string optional. Notes for the project. -#' @param auto_share boolean optional. A toggle for sharing the objects within the project when the project is shared.This does not automatically share new objects to the project. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for this project.} @@ -15660,14 +17415,6 @@ projects_get <- function(project_id) { #' \item name string, #' \item lastRun object, #' }} -#' \item{appInstances}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The item's ID. -#' \item createdAt string, -#' \item updatedAt string, -#' \item name string, -#' \item slug string, -#' }} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The item's ID. @@ -15687,10 +17434,13 @@ projects_get <- function(project_id) { #' \item icon string, #' \item author string, #' \item updatedAt string, +#' \item autoShare boolean, #' \item archived string, The archival status of the requested item(s). #' \item hidden boolean, The hidden status of the item. +#' \item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". #' }} #' \item{note}{string, } +#' \item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{parentProject}{list, A list containing the following elements: @@ -15698,14 +17448,15 @@ projects_get <- function(project_id) { #' \item id integer, The parent project's ID. #' \item name integer, The parent project's name. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export -projects_put <- function(project_id, name = NULL, description = NULL, note = NULL, auto_share = NULL) { +projects_put <- function(project_id, name = NULL, description = NULL, note = NULL) { args <- as.list(match.call())[-1] path <- "/projects/{project_id}" path_params <- list(project_id = project_id) query_params <- list() - body_params <- list(name = name, description = description, note = note, autoShare = auto_share) + body_params <- list(name = name, description = description, note = note) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -15738,6 +17489,200 @@ projects_delete <- function(project_id) { } +#' Enable or disable Auto-Share on a project +#' @param project_id integer required. +#' @param auto_share boolean required. A toggle for sharing the objects within the project when the project is shared or objects are added. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for this project.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{name}{string, The name of this project.} +#' \item{description}{string, A description of the project.} +#' \item{users}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{autoShare}{boolean, } +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{tables}{array, An array containing the following fields: +#' \itemize{ +#' \item schema string, +#' \item name string, +#' \item rowCount integer, +#' \item columnCount integer, +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{surveys}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{scripts}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item type string, +#' \item finishedAt string, +#' \item state string, +#' \item lastRun object, +#' }} +#' \item{imports}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item type string, +#' \item finishedAt string, +#' \item state string, +#' \item lastRun object, +#' }} +#' \item{exports}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item type string, +#' \item finishedAt string, +#' \item state string, +#' \item lastRun object, +#' }} +#' \item{models}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item state string, +#' }} +#' \item{notebooks}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item currentDeploymentId integer, +#' \item lastDeploy object, +#' }} +#' \item{services}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item currentDeploymentId integer, +#' \item lastDeploy object, +#' }} +#' \item{workflows}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item state string, +#' \item lastExecution object, +#' }} +#' \item{reports}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item state string, +#' }} +#' \item{scriptTemplates}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' }} +#' \item{files}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item fileName string, +#' \item fileSize integer, +#' \item expired boolean, +#' }} +#' \item{enhancements}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item lastRun object, +#' }} +#' \item{projects}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The item's ID. +#' \item createdAt string, +#' \item updatedAt string, +#' \item name string, +#' \item description string, +#' }} +#' \item{allObjects}{array, An array containing the following fields: +#' \itemize{ +#' \item projectId integer, +#' \item objectId integer, +#' \item objectType string, +#' \item fcoType string, +#' \item subType string, +#' \item name string, +#' \item icon string, +#' \item author string, +#' \item updatedAt string, +#' \item autoShare boolean, +#' \item archived string, The archival status of the requested item(s). +#' \item hidden boolean, The hidden status of the item. +#' \item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". +#' }} +#' \item{note}{string, } +#' \item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{parentProject}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The parent project's ID. +#' \item name integer, The parent project's name. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' @export +projects_put_auto_share <- function(project_id, auto_share) { + + args <- as.list(match.call())[-1] + path <- "/projects/{project_id}/auto_share" + path_params <- list(project_id = project_id) + query_params <- list() + body_params <- list(autoShare = auto_share) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List users and groups permissioned on this object #' @param id integer required. The ID of the resource that is shared. #' @@ -15909,6 +17854,70 @@ projects_delete_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +projects_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/projects/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +projects_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/projects/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Update the archive status of this object #' @param id integer required. The ID of the object. #' @param status boolean required. The desired archived status of the object. @@ -16051,14 +18060,6 @@ projects_delete_shares_groups <- function(id, group_id) { #' \item name string, #' \item lastRun object, #' }} -#' \item{appInstances}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The item's ID. -#' \item createdAt string, -#' \item updatedAt string, -#' \item name string, -#' \item slug string, -#' }} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The item's ID. @@ -16078,10 +18079,13 @@ projects_delete_shares_groups <- function(id, group_id) { #' \item icon string, #' \item author string, #' \item updatedAt string, +#' \item autoShare boolean, #' \item archived string, The archival status of the requested item(s). #' \item hidden boolean, The hidden status of the item. +#' \item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". #' }} #' \item{note}{string, } +#' \item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{parentProject}{list, A list containing the following elements: @@ -16089,6 +18093,7 @@ projects_delete_shares_groups <- function(id, group_id) { #' \item id integer, The parent project's ID. #' \item name integer, The parent project's name. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export projects_put_archive <- function(id, status) { @@ -16199,15 +18204,22 @@ projects_delete_parent_projects <- function(id, parent_project_id) { } -#' List +#' List queries +#' @param query string optional. Space delimited query for searching queries by their SQL. Supports wild card characters "?" for any single character, and "*" for zero or more characters. #' @param database_id integer optional. The database ID. +#' @param credential_id integer optional. The credential ID. #' @param author_id integer optional. The author of the query. #' @param created_before string optional. An upper bound for the creation date of the query. +#' @param created_after string optional. A lower bound for the creation date of the query. +#' @param started_before string optional. An upper bound for the start date of the last run. +#' @param started_after string optional. A lower bound for the start date of the last run. +#' @param state array optional. The state of the last run. One or more of queued, running, succeeded, failed, and cancelled. Specify multiple values as a comma-separated list (e.g., "A,B"). #' @param exclude_results boolean optional. If true, does not return cached query results. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param archived string optional. The archival status of the requested item(s). #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at. +#' @param order string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, started_at. #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: @@ -16222,19 +18234,20 @@ projects_delete_parent_projects <- function(id, parent_project_id) { #' \item{error}{string, The error message for this run, if present.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{startedAt}{string, The start time of the last run.} #' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} +#' \item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} #' \item{lastRunId}{integer, The ID of the last run.} +#' \item{archived}{string, The archival status of the requested item(s).} #' \item{previewRows}{integer, The number of rows to save from the query's result (maximum: 100).} -#' \item{startedAt}{string, The start time of the last run.} #' \item{reportId}{integer, The ID of the report associated with this query.} #' @export -queries_list <- function(database_id = NULL, author_id = NULL, created_before = NULL, exclude_results = NULL, hidden = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +queries_list <- function(query = NULL, database_id = NULL, credential_id = NULL, author_id = NULL, created_before = NULL, created_after = NULL, started_before = NULL, started_after = NULL, state = NULL, exclude_results = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] path <- "/queries/" path_params <- list() - query_params <- list(database_id = database_id, author_id = author_id, created_before = created_before, exclude_results = exclude_results, hidden = hidden, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(query = query, database_id = database_id, credential_id = credential_id, author_id = author_id, created_before = created_before, created_after = created_after, started_before = started_before, started_after = started_after, state = state, exclude_results = exclude_results, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -16271,10 +18284,13 @@ queries_list <- function(database_id = NULL, author_id = NULL, created_before = #' \item{error}{string, The error message for this run, if present.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{startedAt}{string, The start time of the last run.} #' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} +#' \item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} #' \item{lastRunId}{integer, The ID of the last run.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{interactive}{boolean, Deprecated and not used.} #' \item{previewRows}{integer, The number of rows to save from the query's result (maximum: 100).} #' \item{includeHeader}{boolean, Whether the CSV output should include a header row [default: true].} @@ -16282,7 +18298,6 @@ queries_list <- function(database_id = NULL, author_id = NULL, created_before = #' \item{columnDelimiter}{string, The delimiter to use. One of comma or tab, or pipe [default: comma].} #' \item{unquoted}{boolean, If true, will not quote fields.} #' \item{filenamePrefix}{string, The output filename prefix.} -#' \item{startedAt}{string, The start time of the last run.} #' \item{reportId}{integer, The ID of the report associated with this query.} #' @export queries_post <- function(database, sql, preview_rows, credential = NULL, hidden = NULL, interactive = NULL, include_header = NULL, compression = NULL, column_delimiter = NULL, unquoted = NULL, filename_prefix = NULL) { @@ -16310,8 +18325,9 @@ queries_post <- function(database, sql, preview_rows, credential = NULL, hidden #' \item{queryId}{integer, The ID of the query.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export queries_post_runs <- function(id) { @@ -16343,8 +18359,9 @@ queries_post_runs <- function(id) { #' \item{queryId}{integer, The ID of the query.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export queries_list_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -16373,8 +18390,9 @@ queries_list_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, o #' \item{queryId}{integer, The ID of the query.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export queries_get_runs <- function(id, run_id) { @@ -16462,10 +18480,12 @@ queries_list_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { #' \item{error}{string, The error message for this run, if present.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{startedAt}{string, The start time of the last run.} #' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} +#' \item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} #' \item{lastRunId}{integer, The ID of the last run.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} #' \item{name}{string, The name of the query.} #' \item{author}{list, A list containing the following elements: #' \itemize{ @@ -16475,7 +18495,6 @@ queries_list_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{startedAt}{string, The start time of the last run.} #' \item{reportId}{integer, The ID of the report associated with this query.} #' @export queries_put_scripts <- function(id, script_id) { @@ -16510,10 +18529,12 @@ queries_put_scripts <- function(id, script_id) { #' \item{error}{string, The error message for this run, if present.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{startedAt}{string, The start time of the last run.} #' \item{finishedAt}{string, The end time of the last run.} -#' \item{state}{string, The state of the last run.} +#' \item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} #' \item{lastRunId}{integer, The ID of the last run.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} #' \item{name}{string, The name of the query.} #' \item{author}{list, A list containing the following elements: #' \itemize{ @@ -16523,7 +18544,6 @@ queries_put_scripts <- function(id, script_id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{startedAt}{string, The start time of the last run.} #' \item{reportId}{integer, The ID of the report associated with this query.} #' @export queries_get <- function(id) { @@ -16543,14 +18563,63 @@ queries_get <- function(id) { } -#' List the remote hosts -#' @param type string optional. The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce +#' Sets Query Hidden to true +#' @param id integer required. The query ID. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The query ID.} +#' \item{database}{integer, The database ID.} +#' \item{sql}{string, The SQL to execute.} +#' \item{credential}{integer, The credential ID.} +#' \item{resultRows}{array, A preview of rows returned by the query.} +#' \item{resultColumns}{array, A preview of columns returned by the query.} +#' \item{scriptId}{integer, The ID of the script associated with this query.} +#' \item{exception}{string, Deprecated and not used.} +#' \item{error}{string, The error message for this run, if present.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{startedAt}{string, The start time of the last run.} +#' \item{finishedAt}{string, The end time of the last run.} +#' \item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} +#' \item{lastRunId}{integer, The ID of the last run.} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{name}{string, The name of the query.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{reportId}{integer, The ID of the report associated with this query.} +#' @export +queries_delete <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/queries/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List Remote Hosts +#' @param type string optional. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce #' #' @return An array containing the following fields: #' \item{id}{integer, The ID of the remote host.} -#' \item{name}{string, The name of the remote host.} -#' \item{type}{string, The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} -#' \item{url}{string, The URL for remote host.} +#' \item{name}{string, The human readable name for the remote host.} +#' \item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +#' \item{url}{string, The URL for the remote host.} #' @export remote_hosts_list <- function(type = NULL) { @@ -16569,16 +18638,28 @@ remote_hosts_list <- function(type = NULL) { } -#' Create a new remote host +#' Create a Remote Host #' @param name string required. The human readable name for the remote host. -#' @param url string required. The URL to your host. -#' @param type string required. The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce +#' @param url string required. The URL for the remote host. +#' @param type string required. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce #' #' @return A list containing the following elements: #' \item{id}{integer, The ID of the remote host.} -#' \item{name}{string, The name of the remote host.} -#' \item{type}{string, The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} -#' \item{url}{string, The URL for remote host.} +#' \item{name}{string, The human readable name for the remote host.} +#' \item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +#' \item{url}{string, The URL for the remote host.} +#' \item{description}{string, The description of the remote host.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } #' @export remote_hosts_post <- function(name, url, type) { @@ -16597,6 +18678,299 @@ remote_hosts_post <- function(name, url, type) { } +#' Get a Remote Host +#' @param id integer required. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the remote host.} +#' \item{name}{string, The human readable name for the remote host.} +#' \item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +#' \item{url}{string, The URL for the remote host.} +#' \item{description}{string, The description of the remote host.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' @export +remote_hosts_get <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Replace all attributes of this Remote Host +#' @param id integer required. The ID of the remote host. +#' @param name string required. The human readable name for the remote host. +#' @param type string required. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce +#' @param url string required. The URL for the remote host. +#' @param description string required. The description of the remote host. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the remote host.} +#' \item{name}{string, The human readable name for the remote host.} +#' \item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +#' \item{url}{string, The URL for the remote host.} +#' \item{description}{string, The description of the remote host.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' @export +remote_hosts_put <- function(id, name, type, url, description) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list(name = name, type = type, url = url, description = description) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Update some attributes of this Remote Host +#' @param id integer required. The ID of the remote host. +#' @param name string optional. The human readable name for the remote host. +#' @param type string optional. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce +#' @param url string optional. The URL for the remote host. +#' @param description string optional. The description of the remote host. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the remote host.} +#' \item{name}{string, The human readable name for the remote host.} +#' \item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +#' \item{url}{string, The URL for the remote host.} +#' \item{description}{string, The description of the remote host.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' @export +remote_hosts_patch <- function(id, name = NULL, type = NULL, url = NULL, description = NULL) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list(name = name, type = type, url = url, description = description) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List users and groups permissioned on this object +#' @param id integer required. The ID of the resource that is shared. +#' +#' @return An array containing the following fields: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +remote_hosts_list_shares <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}/shares" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions users have on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_ids array required. An array of one or more user IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' +#' @return A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +remote_hosts_put_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}/shares/users" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. +#' +#' @return An empty HTTP response +#' @export +remote_hosts_delete_shares_users <- function(id, user_id) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' +#' @return A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +remote_hosts_put_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}/shares/groups" + path_params <- list(id = id) + query_params <- list() + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Revoke the permissions a group has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_id integer required. The ID of the group. +#' +#' @return An empty HTTP response +#' @export +remote_hosts_delete_shares_groups <- function(id, group_id) { + + args <- as.list(match.call())[-1] + path <- "/remote_hosts/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Authenticate against a remote host using either a credential or a user name and password #' @param id integer required. The ID of the remote host. #' @param credential_id integer optional. The credential ID. @@ -16653,8 +19027,8 @@ remote_hosts_list_data_sets <- function(id, credential_id = NULL, username = NUL #' List Reports #' @param type string optional. If specified, return report of these types. It accepts a comma-separated list, possible values are 'tableau' or 'other'. -#' @param author string optional. If specified, return reports from this author. It accepts a comma-separated list of author ids. #' @param template_id integer optional. If specified, return reports using the provided Template. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' @param archived string optional. The archival status of the requested item(s). #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. @@ -16705,12 +19079,12 @@ remote_hosts_list_data_sets <- function(id, credential_id = NULL, username = NUL #' }} #' \item{archived}{string, The archival status of the requested item(s).} #' @export -reports_list <- function(type = NULL, author = NULL, template_id = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +reports_list <- function(type = NULL, template_id = NULL, author = NULL, hidden = NULL, archived = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] path <- "/reports/" path_params <- list() - query_params <- list(type = type, author = author, template_id = template_id, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(type = type, template_id = template_id, author = author, hidden = hidden, archived = archived, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -16744,6 +19118,7 @@ reports_list <- function(type = NULL, author = NULL, template_id = NULL, hidden #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID for the project. @@ -16805,7 +19180,7 @@ reports_post <- function(script_id = NULL, name = NULL, code_body = NULL, app_st #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -16815,7 +19190,8 @@ reports_post <- function(script_id = NULL, name = NULL, code_body = NULL, app_st #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export reports_list_git <- function(id) { @@ -16840,10 +19216,11 @@ reports_list_git <- function(id) { #' @param git_branch string optional. The git branch that the file is on. #' @param git_path string optional. The path of the file in the repository. #' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. #' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -16853,15 +19230,16 @@ reports_list_git <- function(id) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -reports_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, pull_from_git = NULL) { +reports_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] path <- "/reports/{id}/git" path_params <- list(id = id) query_params <- list() - body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, pullFromGit = pull_from_git) + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -16872,7 +19250,47 @@ reports_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NU } -#' Get the git commits for an item +#' Update an attached git file +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. +#' +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +#' @export +reports_patch_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { + + args <- as.list(match.call())[-1] + path <- "/reports/{id}/git" + path_params <- list(id = id) + query_params <- list() + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the git commits for an item on the current branch #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: @@ -16927,7 +19345,7 @@ reports_post_git_commits <- function(id, content, message, file_hash) { } -#' Get file contents at commit_hash +#' Get file contents at git ref #' @param id integer required. The ID of the file. #' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. #' @@ -16954,6 +19372,58 @@ reports_get_git_commits <- function(id, commit_hash) { } +#' Checkout latest commit on the current branch of a script or workflow +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +reports_post_git_checkout_latest <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/reports/{id}/git/checkout-latest" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Checkout content that the existing git_ref points to and save to the object +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +reports_post_git_checkout <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/reports/{id}/git/checkout" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Show a single report #' @param id integer required. The ID of this report. #' @@ -16970,6 +19440,7 @@ reports_get_git_commits <- function(id, commit_hash) { #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID for the project. @@ -17051,6 +19522,7 @@ reports_get <- function(id) { #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID for the project. @@ -17124,6 +19596,7 @@ reports_patch <- function(id, name = NULL, script_id = NULL, code_body = NULL, c #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID for the project. @@ -17374,6 +19847,70 @@ reports_delete_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +reports_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/reports/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +reports_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/reports/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a Report belongs to #' @param id integer required. The ID of the Report. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -17483,6 +20020,7 @@ reports_delete_projects <- function(id, project_id) { #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{projects}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID for the project. @@ -17556,12 +20094,14 @@ reports_put_archive <- function(id, status) { #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{host}{string, The host for the service report} #' \item{displayUrl}{string, The URL to display the service report.} #' \item{serviceId}{integer, The id of the backing service} #' \item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} #' \item{apiKey}{string, A Civis API key that can be used by this report.} #' \item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export reports_get_services <- function(id) { @@ -17598,12 +20138,14 @@ reports_get_services <- function(id) { #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{host}{string, The host for the service report} #' \item{displayUrl}{string, The URL to display the service report.} #' \item{serviceId}{integer, The id of the backing service} #' \item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} #' \item{apiKey}{string, A Civis API key that can be used by this report.} #' \item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export reports_patch_services <- function(id, name = NULL, provide_api_key = NULL) { @@ -17639,12 +20181,14 @@ reports_patch_services <- function(id, name = NULL, provide_api_key = NULL) { #' }} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{host}{string, The host for the service report} #' \item{displayUrl}{string, The URL to display the service report.} #' \item{serviceId}{integer, The id of the backing service} #' \item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} #' \item{apiKey}{string, A Civis API key that can be used by this report.} #' \item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +#' \item{archived}{string, The archival status of the requested item(s).} #' @export reports_post_services <- function(service_id, provide_api_key = NULL) { @@ -17834,6 +20378,70 @@ reports_delete_services_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +reports_list_services_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/reports/services/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +reports_put_services_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/reports/services/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a Service Report belongs to #' @param id integer required. The ID of the Service Report. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -17926,6 +20534,49 @@ reports_delete_services_projects <- function(id, project_id) { } +#' Update the archive status of this object +#' @param id integer required. The ID of the object. +#' @param status boolean required. The desired archived status of the object. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this report.} +#' \item{name}{string, The name of the report.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{host}{string, The host for the service report} +#' \item{displayUrl}{string, The URL to display the service report.} +#' \item{serviceId}{integer, The id of the backing service} +#' \item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} +#' \item{apiKey}{string, A Civis API key that can be used by this report.} +#' \item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' @export +reports_put_services_archive <- function(id, status) { + + args <- as.list(match.call())[-1] + path <- "/reports/services/{id}/archive" + path_params <- list(id = id) + query_params <- list() + body_params <- list(status = status) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Refresh the data in this Tableau report #' @param id integer required. The ID of this report. #' @@ -17956,6 +20607,35 @@ reports_post_refresh <- function(id) { } +#' List Roles +#' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to id. Must be one of: id. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, ID of the Role.} +#' \item{name}{string, The name of the Role.} +#' \item{slug}{string, The slug.} +#' \item{description}{string, The description of the Role.} +#' @export +roles_list <- function(limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/roles/" + path_params <- list() + query_params <- list(limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List users and groups permissioned on this object #' @param id integer required. The ID of the resource that is shared. #' @@ -18265,10 +20945,11 @@ scripts_list_history <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -18302,6 +20983,7 @@ scripts_list_history <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -18327,7 +21009,7 @@ scripts_post <- function(name, remote_host_id, credential_id, sql, params = NULL #' List Scripts #' @param type string optional. If specified, return items of these types. The valid types are sql, python3, javascript, r, and containers. #' @param category string optional. A job category for filtering scripts. Must be one of script, import, export, and enhancement. -#' @param author string optional. If specified, return items from this author. Must use user IDs. A comma separated list of IDs is also accepted to return items from multiple authors. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param status string optional. If specified, returns items with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' @param archived string optional. The archival status of the requested item(s). @@ -18415,10 +21097,11 @@ scripts_list <- function(type = NULL, category = NULL, author = NULL, status = N #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -18434,6 +21117,7 @@ scripts_list <- function(type = NULL, category = NULL, author = NULL, status = N #' \item failureOn boolean, If failure email notifications are on. #' } #' @param parent_id integer optional. The ID of the parent job that will trigger this script +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -18484,10 +21168,11 @@ scripts_list <- function(type = NULL, category = NULL, author = NULL, status = N #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -18521,6 +21206,7 @@ scripts_list <- function(type = NULL, category = NULL, author = NULL, status = N #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -18528,13 +21214,13 @@ scripts_list <- function(type = NULL, category = NULL, author = NULL, status = N #' \item{expandedArguments}{list, Expanded arguments for use in injecting into different environments.} #' \item{templateScriptId}{integer, The ID of the template script, if any.} #' @export -scripts_patch <- function(id, name = NULL, sql = NULL, params = NULL, arguments = NULL, template_script_id = NULL, schedule = NULL, notifications = NULL, parent_id = NULL) { +scripts_patch <- function(id, name = NULL, sql = NULL, params = NULL, arguments = NULL, template_script_id = NULL, schedule = NULL, notifications = NULL, parent_id = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, sql = sql, params = params, arguments = arguments, templateScriptId = template_script_id, schedule = schedule, notifications = notifications, parentId = parent_id) + body_params <- list(name = name, sql = sql, params = params, arguments = arguments, templateScriptId = template_script_id, schedule = schedule, notifications = notifications, parentId = parent_id, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -18619,10 +21305,11 @@ scripts_delete <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -18656,6 +21343,7 @@ scripts_delete <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -18730,10 +21418,9 @@ scripts_post_cancel <- function(id) { #' Create a container #' @param required_resources list required. A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. -#' \item wholeInstance boolean, Whether or not to use the entire instance. If true, cpu, memory, and disk space are not required and will be set to an instance's max. #' } #' @param docker_image_name string required. The name of the docker image to pull from DockerHub. #' @param name string optional. The name of the container. @@ -18754,10 +21441,11 @@ scripts_post_cancel <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -18781,11 +21469,19 @@ scripts_post_cancel <- function(id) { #' @param instance_type string optional. The EC2 instance type to deploy to. Only available for jobs running on kubernetes. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param time_zone string optional. The time zone of this script. +#' @param partition_label string optional. The partition label used to run this object. #' @param hidden boolean optional. The hidden status of the item. #' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the container.} #' \item{type}{string, The type of the script (e.g Container)} #' \item{createdAt}{string, The time this script was created.} @@ -18833,10 +21529,11 @@ scripts_post_cancel <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -18861,8 +21558,8 @@ scripts_post_cancel <- function(id) { #' }} #' \item{requiredResources}{list, A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. #' }} #' \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -18884,17 +21581,20 @@ scripts_post_cancel <- function(id) { #' \item error string, The error message for this run, if present. #' }} #' \item{timeZone}{string, The time zone of this script.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_post_containers <- function(required_resources, docker_image_name, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, repo_http_uri = NULL, repo_ref = NULL, remote_host_credential_id = NULL, git_credential_id = NULL, docker_command = NULL, docker_image_tag = NULL, instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL) { +scripts_post_containers <- function(required_resources, docker_image_name, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, repo_http_uri = NULL, repo_ref = NULL, remote_host_credential_id = NULL, git_credential_id = NULL, docker_command = NULL, docker_image_tag = NULL, instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, partition_label = NULL, hidden = NULL, target_project_id = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/containers" path_params <- list() query_params <- list() - body_params <- list(requiredResources = required_resources, dockerImageName = docker_image_name, name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, repoHttpUri = repo_http_uri, repoRef = repo_ref, remoteHostCredentialId = remote_host_credential_id, gitCredentialId = git_credential_id, dockerCommand = docker_command, dockerImageTag = docker_image_tag, instanceType = instance_type, cancelTimeout = cancel_timeout, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id) + body_params <- list(requiredResources = required_resources, dockerImageName = docker_image_name, name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, repoHttpUri = repo_http_uri, repoRef = repo_ref, remoteHostCredentialId = remote_host_credential_id, gitCredentialId = git_credential_id, dockerCommand = docker_command, dockerImageTag = docker_image_tag, instanceType = instance_type, cancelTimeout = cancel_timeout, timeZone = time_zone, partitionLabel = partition_label, hidden = hidden, targetProjectId = target_project_id, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -18910,6 +21610,12 @@ scripts_post_containers <- function(required_resources, docker_image_name, name #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the container.} #' \item{type}{string, The type of the script (e.g Container)} #' \item{createdAt}{string, The time this script was created.} @@ -18957,10 +21663,11 @@ scripts_post_containers <- function(required_resources, docker_image_name, name #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -18985,8 +21692,8 @@ scripts_post_containers <- function(required_resources, docker_image_name, name #' }} #' \item{requiredResources}{list, A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. #' }} #' \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -19008,9 +21715,12 @@ scripts_post_containers <- function(required_resources, docker_image_name, name #' \item error string, The error message for this run, if present. #' }} #' \item{timeZone}{string, The time zone of this script.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_get_containers <- function(id) { @@ -19033,10 +21743,9 @@ scripts_get_containers <- function(id) { #' @param id integer required. The ID for the script. #' @param required_resources list required. A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. -#' \item wholeInstance boolean, Whether or not to use the entire instance. If true, cpu, memory, and disk space are not required and will be set to an instance's max. #' } #' @param docker_image_name string required. The name of the docker image to pull from DockerHub. #' @param name string optional. The name of the container. @@ -19057,10 +21766,11 @@ scripts_get_containers <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -19084,10 +21794,18 @@ scripts_get_containers <- function(id) { #' @param instance_type string optional. The EC2 instance type to deploy to. Only available for jobs running on kubernetes. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param time_zone string optional. The time zone of this script. +#' @param partition_label string optional. The partition label used to run this object. #' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the container.} #' \item{type}{string, The type of the script (e.g Container)} #' \item{createdAt}{string, The time this script was created.} @@ -19135,10 +21853,11 @@ scripts_get_containers <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -19163,8 +21882,8 @@ scripts_get_containers <- function(id) { #' }} #' \item{requiredResources}{list, A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. #' }} #' \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -19186,17 +21905,20 @@ scripts_get_containers <- function(id) { #' \item error string, The error message for this run, if present. #' }} #' \item{timeZone}{string, The time zone of this script.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_put_containers <- function(id, required_resources, docker_image_name, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, repo_http_uri = NULL, repo_ref = NULL, remote_host_credential_id = NULL, git_credential_id = NULL, docker_command = NULL, docker_image_tag = NULL, instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, target_project_id = NULL) { +scripts_put_containers <- function(id, required_resources, docker_image_name, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, repo_http_uri = NULL, repo_ref = NULL, remote_host_credential_id = NULL, git_credential_id = NULL, docker_command = NULL, docker_image_tag = NULL, instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, partition_label = NULL, target_project_id = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/containers/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(requiredResources = required_resources, dockerImageName = docker_image_name, name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, repoHttpUri = repo_http_uri, repoRef = repo_ref, remoteHostCredentialId = remote_host_credential_id, gitCredentialId = git_credential_id, dockerCommand = docker_command, dockerImageTag = docker_image_tag, instanceType = instance_type, cancelTimeout = cancel_timeout, timeZone = time_zone, targetProjectId = target_project_id) + body_params <- list(requiredResources = required_resources, dockerImageName = docker_image_name, name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, repoHttpUri = repo_http_uri, repoRef = repo_ref, remoteHostCredentialId = remote_host_credential_id, gitCredentialId = git_credential_id, dockerCommand = docker_command, dockerImageTag = docker_image_tag, instanceType = instance_type, cancelTimeout = cancel_timeout, timeZone = time_zone, partitionLabel = partition_label, targetProjectId = target_project_id, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -19227,10 +21949,11 @@ scripts_put_containers <- function(id, required_resources, docker_image_name, na #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -19247,10 +21970,9 @@ scripts_put_containers <- function(id, required_resources, docker_image_name, na #' } #' @param required_resources list optional. A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. -#' \item wholeInstance boolean, Whether or not to use the entire instance. If true, cpu, memory, and disk space are not required and will be set to an instance's max. #' } #' @param repo_http_uri string optional. The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git. #' @param repo_ref string optional. The tag or branch of the github repo to clone into the container. @@ -19262,10 +21984,18 @@ scripts_put_containers <- function(id, required_resources, docker_image_name, na #' @param instance_type string optional. The EC2 instance type to deploy to. Only available for jobs running on kubernetes. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param time_zone string optional. The time zone of this script. +#' @param partition_label string optional. The partition label used to run this object. #' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the container.} #' \item{type}{string, The type of the script (e.g Container)} #' \item{createdAt}{string, The time this script was created.} @@ -19313,10 +22043,11 @@ scripts_put_containers <- function(id, required_resources, docker_image_name, na #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -19341,8 +22072,8 @@ scripts_put_containers <- function(id, required_resources, docker_image_name, na #' }} #' \item{requiredResources}{list, A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. #' }} #' \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -19364,17 +22095,20 @@ scripts_put_containers <- function(id, required_resources, docker_image_name, na #' \item error string, The error message for this run, if present. #' }} #' \item{timeZone}{string, The time zone of this script.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_patch_containers <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, required_resources = NULL, repo_http_uri = NULL, repo_ref = NULL, remote_host_credential_id = NULL, git_credential_id = NULL, docker_command = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, target_project_id = NULL) { +scripts_patch_containers <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, required_resources = NULL, repo_http_uri = NULL, repo_ref = NULL, remote_host_credential_id = NULL, git_credential_id = NULL, docker_command = NULL, docker_image_name = NULL, docker_image_tag = NULL, instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, partition_label = NULL, target_project_id = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/containers/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, requiredResources = required_resources, repoHttpUri = repo_http_uri, repoRef = repo_ref, remoteHostCredentialId = remote_host_credential_id, gitCredentialId = git_credential_id, dockerCommand = docker_command, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, cancelTimeout = cancel_timeout, timeZone = time_zone, targetProjectId = target_project_id) + body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, requiredResources = required_resources, repoHttpUri = repo_http_uri, repoRef = repo_ref, remoteHostCredentialId = remote_host_credential_id, gitCredentialId = git_credential_id, dockerCommand = docker_command, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, instanceType = instance_type, cancelTimeout = cancel_timeout, timeZone = time_zone, partitionLabel = partition_label, targetProjectId = target_project_id, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -19490,10 +22224,11 @@ scripts_list_containers_runs_logs <- function(id, run_id, last_id = NULL, limit #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -19522,6 +22257,7 @@ scripts_list_containers_runs_logs <- function(id, run_id, last_id = NULL, limit #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' } +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -19572,10 +22308,11 @@ scripts_list_containers_runs_logs <- function(id, run_id, last_id = NULL, limit #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -19609,6 +22346,7 @@ scripts_list_containers_runs_logs <- function(id, run_id, last_id = NULL, limit #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -19627,14 +22365,15 @@ scripts_list_containers_runs_logs <- function(id, run_id, last_id = NULL, limit #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' }} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_post_sql <- function(name, sql, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, csv_settings = NULL) { +scripts_post_sql <- function(name, sql, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, csv_settings = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/sql" path_params <- list() query_params <- list() - body_params <- list(name = name, sql = sql, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, csvSettings = csv_settings) + body_params <- list(name = name, sql = sql, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, csvSettings = csv_settings, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -19697,10 +22436,11 @@ scripts_post_sql <- function(name, sql, remote_host_id, credential_id, parent_id #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -19734,6 +22474,7 @@ scripts_post_sql <- function(name, sql, remote_host_id, credential_id, parent_id #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -19752,6 +22493,7 @@ scripts_post_sql <- function(name, sql, remote_host_id, credential_id, parent_id #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' }} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_get_sql <- function(id) { @@ -19793,10 +22535,11 @@ scripts_get_sql <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -19824,6 +22567,7 @@ scripts_get_sql <- function(id) { #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' } +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -19874,10 +22618,11 @@ scripts_get_sql <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -19911,6 +22656,7 @@ scripts_get_sql <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -19929,14 +22675,15 @@ scripts_get_sql <- function(id) { #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' }} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_put_sql <- function(id, name, sql, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, csv_settings = NULL) { +scripts_put_sql <- function(id, name, sql, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, csv_settings = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/sql/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, sql = sql, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, csvSettings = csv_settings) + body_params <- list(name = name, sql = sql, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, csvSettings = csv_settings, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -19967,10 +22714,11 @@ scripts_put_sql <- function(id, name, sql, remote_host_id, credential_id, parent #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -20001,6 +22749,7 @@ scripts_put_sql <- function(id, name, sql, remote_host_id, credential_id, parent #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' } +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -20051,10 +22800,11 @@ scripts_put_sql <- function(id, name, sql, remote_host_id, credential_id, parent #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -20088,6 +22838,7 @@ scripts_put_sql <- function(id, name, sql, remote_host_id, credential_id, parent #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -20106,14 +22857,15 @@ scripts_put_sql <- function(id, name, sql, remote_host_id, credential_id, parent #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' }} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_patch_sql <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, sql = NULL, remote_host_id = NULL, credential_id = NULL, csv_settings = NULL) { +scripts_patch_sql <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, sql = NULL, remote_host_id = NULL, credential_id = NULL, csv_settings = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/sql/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, sql = sql, remoteHostId = remote_host_id, credentialId = credential_id, csvSettings = csv_settings) + body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, sql = sql, remoteHostId = remote_host_id, credentialId = credential_id, csvSettings = csv_settings, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -20166,10 +22918,11 @@ scripts_delete_sql <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -20197,6 +22950,8 @@ scripts_delete_sql <- function(id) { #' @param instance_type string optional. The EC2 instance type to deploy to. Only available for jobs running on kubernetes. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub. +#' @param partition_label string optional. The partition label used to run this object. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -20247,10 +23002,11 @@ scripts_delete_sql <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -20284,6 +23040,7 @@ scripts_delete_sql <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -20297,14 +23054,16 @@ scripts_delete_sql <- function(id) { #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_post_python3 <- function(name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL) { +scripts_post_python3 <- function(name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/python3" path_params <- list() query_params <- list() - body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag) + body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -20367,10 +23126,11 @@ scripts_post_python3 <- function(name, source, parent_id = NULL, user_context = #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -20404,6 +23164,7 @@ scripts_post_python3 <- function(name, source, parent_id = NULL, user_context = #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -20417,6 +23178,8 @@ scripts_post_python3 <- function(name, source, parent_id = NULL, user_context = #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_get_python3 <- function(id) { @@ -20456,10 +23219,11 @@ scripts_get_python3 <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -20486,6 +23250,8 @@ scripts_get_python3 <- function(id) { #' @param instance_type string optional. The EC2 instance type to deploy to. Only available for jobs running on kubernetes. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub. +#' @param partition_label string optional. The partition label used to run this object. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -20536,10 +23302,11 @@ scripts_get_python3 <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -20573,6 +23340,7 @@ scripts_get_python3 <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -20586,14 +23354,16 @@ scripts_get_python3 <- function(id) { #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_put_python3 <- function(id, name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL) { +scripts_put_python3 <- function(id, name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/python3/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag) + body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -20624,10 +23394,11 @@ scripts_put_python3 <- function(id, name, source, parent_id = NULL, user_context #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -20655,6 +23426,8 @@ scripts_put_python3 <- function(id, name, source, parent_id = NULL, user_context #' @param source string optional. The body/text of the script. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub. +#' @param partition_label string optional. The partition label used to run this object. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -20705,10 +23478,11 @@ scripts_put_python3 <- function(id, name, source, parent_id = NULL, user_context #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -20742,6 +23516,7 @@ scripts_put_python3 <- function(id, name, source, parent_id = NULL, user_context #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -20755,14 +23530,16 @@ scripts_put_python3 <- function(id, name, source, parent_id = NULL, user_context #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_patch_python3 <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, source = NULL, cancel_timeout = NULL, docker_image_tag = NULL) { +scripts_patch_python3 <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, source = NULL, cancel_timeout = NULL, docker_image_tag = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/python3/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, source = source, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag) + body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, source = source, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -20815,10 +23592,11 @@ scripts_delete_python3 <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -20846,6 +23624,8 @@ scripts_delete_python3 <- function(id) { #' @param instance_type string optional. The EC2 instance type to deploy to. Only available for jobs running on kubernetes. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub. +#' @param partition_label string optional. The partition label used to run this object. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -20896,10 +23676,11 @@ scripts_delete_python3 <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -20933,6 +23714,7 @@ scripts_delete_python3 <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -20946,14 +23728,16 @@ scripts_delete_python3 <- function(id) { #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_post_r <- function(name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL) { +scripts_post_r <- function(name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/r" path_params <- list() query_params <- list() - body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag) + body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -21016,10 +23800,11 @@ scripts_post_r <- function(name, source, parent_id = NULL, user_context = NULL, #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -21053,6 +23838,7 @@ scripts_post_r <- function(name, source, parent_id = NULL, user_context = NULL, #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -21066,6 +23852,8 @@ scripts_post_r <- function(name, source, parent_id = NULL, user_context = NULL, #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_get_r <- function(id) { @@ -21105,10 +23893,11 @@ scripts_get_r <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -21135,6 +23924,8 @@ scripts_get_r <- function(id) { #' @param instance_type string optional. The EC2 instance type to deploy to. Only available for jobs running on kubernetes. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub. +#' @param partition_label string optional. The partition label used to run this object. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -21185,10 +23976,11 @@ scripts_get_r <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -21222,6 +24014,7 @@ scripts_get_r <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -21235,14 +24028,16 @@ scripts_get_r <- function(id) { #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_put_r <- function(id, name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL) { +scripts_put_r <- function(id, name, source, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, docker_image_tag = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/r/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag) + body_params <- list(name = name, source = source, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -21273,10 +24068,11 @@ scripts_put_r <- function(id, name, source, parent_id = NULL, user_context = NUL #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -21304,6 +24100,8 @@ scripts_put_r <- function(id, name, source, parent_id = NULL, user_context = NUL #' @param source string optional. The body/text of the script. #' @param cancel_timeout integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0. #' @param docker_image_tag string optional. The tag of the docker image to pull from DockerHub. +#' @param partition_label string optional. The partition label used to run this object. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -21354,10 +24152,11 @@ scripts_put_r <- function(id, name, source, parent_id = NULL, user_context = NUL #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -21391,6 +24190,7 @@ scripts_put_r <- function(id, name, source, parent_id = NULL, user_context = NUL #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -21404,14 +24204,16 @@ scripts_put_r <- function(id, name, source, parent_id = NULL, user_context = NUL #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_patch_r <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, source = NULL, cancel_timeout = NULL, docker_image_tag = NULL) { +scripts_patch_r <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, instance_type = NULL, source = NULL, cancel_timeout = NULL, docker_image_tag = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/r/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, source = source, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag) + body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, instanceType = instance_type, source = source, cancelTimeout = cancel_timeout, dockerImageTag = docker_image_tag, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -21466,10 +24268,11 @@ scripts_delete_r <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -21488,6 +24291,7 @@ scripts_delete_r <- function(id) { #' @param time_zone string optional. The time zone of this script. #' @param hidden boolean optional. The hidden status of the item. #' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -21538,10 +24342,11 @@ scripts_delete_r <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -21575,20 +24380,22 @@ scripts_delete_r <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{source}{string, The body/text of the script.} #' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} #' \item{credentialId}{integer, The credential that this script will use.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_post_javascript <- function(name, source, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL) { +scripts_post_javascript <- function(name, source, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/javascript" path_params <- list() query_params <- list() - body_params <- list(name = name, source = source, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id) + body_params <- list(name = name, source = source, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -21651,10 +24458,11 @@ scripts_post_javascript <- function(name, source, remote_host_id, credential_id, #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -21688,12 +24496,14 @@ scripts_post_javascript <- function(name, source, remote_host_id, credential_id, #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{source}{string, The body/text of the script.} #' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} #' \item{credentialId}{integer, The credential that this script will use.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_get_javascript <- function(id) { @@ -21735,10 +24545,11 @@ scripts_get_javascript <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -21756,6 +24567,7 @@ scripts_get_javascript <- function(id) { #' @param next_run_at string optional. The time of the next scheduled run. #' @param time_zone string optional. The time zone of this script. #' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -21806,10 +24618,11 @@ scripts_get_javascript <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -21843,20 +24656,22 @@ scripts_get_javascript <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{source}{string, The body/text of the script.} #' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} #' \item{credentialId}{integer, The credential that this script will use.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_put_javascript <- function(id, name, source, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL) { +scripts_put_javascript <- function(id, name, source, remote_host_id, credential_id, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/javascript/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, source = source, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id) + body_params <- list(name = name, source = source, remoteHostId = remote_host_id, credentialId = credential_id, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -21887,10 +24702,11 @@ scripts_put_javascript <- function(id, name, source, remote_host_id, credential_ #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -21911,6 +24727,7 @@ scripts_put_javascript <- function(id, name, source, remote_host_id, credential_ #' @param source string optional. The body/text of the script. #' @param remote_host_id integer optional. The remote host ID that this script will connect to. #' @param credential_id integer optional. The credential that this script will use. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} @@ -21961,10 +24778,11 @@ scripts_put_javascript <- function(id, name, source, remote_host_id, credential_ #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -21998,20 +24816,22 @@ scripts_put_javascript <- function(id, name, source, remote_host_id, credential_ #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{source}{string, The body/text of the script.} #' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} #' \item{credentialId}{integer, The credential that this script will use.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_patch_javascript <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, source = NULL, remote_host_id = NULL, credential_id = NULL) { +scripts_patch_javascript <- function(id, name = NULL, parent_id = NULL, user_context = NULL, params = NULL, arguments = NULL, schedule = NULL, notifications = NULL, next_run_at = NULL, time_zone = NULL, target_project_id = NULL, source = NULL, remote_host_id = NULL, credential_id = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/javascript/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, source = source, remoteHostId = remote_host_id, credentialId = credential_id) + body_params <- list(name = name, parentId = parent_id, userContext = user_context, params = params, arguments = arguments, schedule = schedule, notifications = notifications, nextRunAt = next_run_at, timeZone = time_zone, targetProjectId = target_project_id, source = source, remoteHostId = remote_host_id, credentialId = credential_id, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -22046,7 +24866,7 @@ scripts_delete_javascript <- function(id) { #' List Custom Scripts #' @param from_template_id string optional. If specified, return scripts based on the template with this ID. Specify multiple IDs as a comma-separated list. -#' @param author string optional. If specified, return items from this author. Must use user IDs. A comma separated list of IDs is also accepted to return items from multiple authors. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param status string optional. If specified, returns items with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' @param archived string optional. The archival status of the requested item(s). @@ -22126,10 +24946,11 @@ scripts_list_custom <- function(from_template_id = NULL, author = NULL, status = #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -22147,9 +24968,23 @@ scripts_list_custom <- function(from_template_id = NULL, author = NULL, status = #' @param time_zone string optional. The time zone of this script. #' @param hidden boolean optional. The hidden status of the item. #' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param required_resources list optional. A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' } +#' @param partition_label string optional. The partition label used to run this object. Only applicable for jobs using Docker. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the script.} #' \item{type}{string, The type of the script (e.g Custom)} #' \item{createdAt}{string, The time this script was created.} @@ -22197,10 +25032,11 @@ scripts_list_custom <- function(from_template_id = NULL, author = NULL, status = #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -22233,6 +25069,7 @@ scripts_list_custom <- function(from_template_id = NULL, author = NULL, status = #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -22245,14 +25082,22 @@ scripts_list_custom <- function(from_template_id = NULL, author = NULL, status = #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{requiredResources}{list, A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' }} +#' \item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_post_custom <- function(from_template_id, name = NULL, parent_id = NULL, arguments = NULL, remote_host_id = NULL, credential_id = NULL, schedule = NULL, notifications = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL) { +scripts_post_custom <- function(from_template_id, name = NULL, parent_id = NULL, arguments = NULL, remote_host_id = NULL, credential_id = NULL, schedule = NULL, notifications = NULL, time_zone = NULL, hidden = NULL, target_project_id = NULL, required_resources = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/custom" path_params <- list() query_params <- list() - body_params <- list(fromTemplateId = from_template_id, name = name, parentId = parent_id, arguments = arguments, remoteHostId = remote_host_id, credentialId = credential_id, schedule = schedule, notifications = notifications, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id) + body_params <- list(fromTemplateId = from_template_id, name = name, parentId = parent_id, arguments = arguments, remoteHostId = remote_host_id, credentialId = credential_id, schedule = schedule, notifications = notifications, timeZone = time_zone, hidden = hidden, targetProjectId = target_project_id, requiredResources = required_resources, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -22268,6 +25113,12 @@ scripts_post_custom <- function(from_template_id, name = NULL, parent_id = NULL, #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the script.} #' \item{type}{string, The type of the script (e.g Custom)} #' \item{createdAt}{string, The time this script was created.} @@ -22315,10 +25166,11 @@ scripts_post_custom <- function(from_template_id, name = NULL, parent_id = NULL, #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -22351,6 +25203,7 @@ scripts_post_custom <- function(from_template_id, name = NULL, parent_id = NULL, #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -22363,6 +25216,14 @@ scripts_post_custom <- function(from_template_id, name = NULL, parent_id = NULL, #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{requiredResources}{list, A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' }} +#' \item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_get_custom <- function(id) { @@ -22391,10 +25252,182 @@ scripts_get_custom <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' } +#' @param notifications list optional. A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' } +#' @param time_zone string optional. The time zone of this script. +#' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param required_resources list optional. A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' } +#' @param partition_label string optional. The partition label used to run this object. Only applicable for jobs using Docker. +#' @param running_as_id integer optional. The ID of the runner of this script. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} +#' \item{name}{string, The name of the script.} +#' \item{type}{string, The type of the script (e.g Custom)} +#' \item{createdAt}{string, The time this script was created.} +#' \item{updatedAt}{string, The time the script was last updated.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{state}{string, The status of the script's last run.} +#' \item{finishedAt}{string, The time that the script's last run finished.} +#' \item{category}{string, } +#' \item{projects}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID for the project. +#' \item name string, The name of the project. +#' }} +#' \item{parentId}{integer, The ID of the parent job that will trigger this script} +#' \item{params}{array, An array containing the following fields: +#' \itemize{ +#' \item name string, The variable's name as used within your code. +#' \item label string, The label to present to users when asking them for the value. +#' \item description string, A short sentence or fragment describing this parameter to the end user. +#' \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +#' \item required boolean, Whether this param is required. +#' \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +#' \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +#' \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +#' }} +#' \item{arguments}{list, Parameter-value pairs to use when running this script. Only settable if this script has defined parameters.} +#' \item{isTemplate}{boolean, Whether others scripts use this one as a template.} +#' \item{publishedAsTemplateId}{integer, The ID of the template that this script is backing.} +#' \item{fromTemplateId}{integer, The ID of the template script.} +#' \item{uiReportUrl}{integer, The url of the custom HTML.} +#' \item{uiReportId}{integer, The id of the report with the custom HTML.} +#' \item{uiReportProvideAPIKey}{boolean, Whether the ui report requests an API Key from the report viewer.} +#' \item{templateScriptName}{string, The name of the template script.} +#' \item{templateNote}{string, The template's note.} +#' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} +#' \item{credentialId}{integer, The credential that this script will use.} +#' \item{codePreview}{string, The code that this script will run with arguments inserted.} +#' \item{schedule}{list, A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth +#' \item scheduledHours array, Hours of the day it is scheduled on. +#' \item scheduledMinutes array, Minutes of the day it is scheduled on. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. +#' }} +#' \item{notifications}{list, A list containing the following elements: +#' \itemize{ +#' \item urls array, URLs to receive a POST request at job completion +#' \item successEmailSubject string, Custom subject line for success e-mail. +#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. +#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. +#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." +#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. +#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. +#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. +#' \item successOn boolean, If success email notifications are on. +#' \item failureOn boolean, If failure email notifications are on. +#' }} +#' \item{runningAs}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{timeZone}{string, The time zone of this script.} +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{hidden}{boolean, The hidden status of the item.} +#' \item{archived}{string, The archival status of the requested item(s).} +#' \item{targetProjectId}{integer, Target project to which script outputs will be added.} +#' \item{lastSuccessfulRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, +#' \item createdAt string, The time that the run was queued. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' \item{requiredResources}{list, A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' }} +#' \item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} +#' @export +scripts_put_custom <- function(id, name = NULL, parent_id = NULL, arguments = NULL, remote_host_id = NULL, credential_id = NULL, schedule = NULL, notifications = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, partition_label = NULL, running_as_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/custom/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list(name = name, parentId = parent_id, arguments = arguments, remoteHostId = remote_host_id, credentialId = credential_id, schedule = schedule, notifications = notifications, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, partitionLabel = partition_label, runningAsId = running_as_id) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Update some attributes of this Custom Script +#' @param id integer required. The ID for the script. +#' @param name string optional. The name of the script. +#' @param parent_id integer optional. The ID of the parent job that will trigger this script +#' @param arguments list optional. Parameter-value pairs to use when running this script. Only settable if this script has defined parameters. +#' @param remote_host_id integer optional. The remote host ID that this script will connect to. +#' @param credential_id integer optional. The credential that this script will use. +#' @param schedule list optional. A list containing the following elements: +#' \itemize{ +#' \item scheduled boolean, If the item is scheduled. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param notifications list optional. A list containing the following elements: #' \itemize{ @@ -22411,9 +25444,23 @@ scripts_get_custom <- function(id) { #' } #' @param time_zone string optional. The time zone of this script. #' @param target_project_id integer optional. Target project to which script outputs will be added. +#' @param required_resources list optional. A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' } +#' @param partition_label string optional. The partition label used to run this object. Only applicable for jobs using Docker. +#' @param running_as_id integer optional. The ID of the runner of this script. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the script.} #' \item{type}{string, The type of the script (e.g Custom)} #' \item{createdAt}{string, The time this script was created.} @@ -22461,10 +25508,11 @@ scripts_get_custom <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -22497,6 +25545,7 @@ scripts_get_custom <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -22509,160 +25558,22 @@ scripts_get_custom <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} -#' @export -scripts_put_custom <- function(id, name = NULL, parent_id = NULL, arguments = NULL, remote_host_id = NULL, credential_id = NULL, schedule = NULL, notifications = NULL, time_zone = NULL, target_project_id = NULL) { - - args <- as.list(match.call())[-1] - path <- "/scripts/custom/{id}" - path_params <- list(id = id) - query_params <- list() - body_params <- list(name = name, parentId = parent_id, arguments = arguments, remoteHostId = remote_host_id, credentialId = credential_id, schedule = schedule, notifications = notifications, timeZone = time_zone, targetProjectId = target_project_id) - path_params <- path_params[match_params(path_params, args)] - query_params <- query_params[match_params(query_params, args)] - body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) - - return(resp) - - } - - -#' Update some attributes of this Custom Script -#' @param id integer required. The ID for the script. -#' @param name string optional. The name of the script. -#' @param parent_id integer optional. The ID of the parent job that will trigger this script -#' @param arguments list optional. Parameter-value pairs to use when running this script. Only settable if this script has defined parameters. -#' @param remote_host_id integer optional. The remote host ID that this script will connect to. -#' @param credential_id integer optional. The credential that this script will use. -#' @param schedule list optional. A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' } -#' @param notifications list optional. A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' } -#' @param time_zone string optional. The time zone of this script. -#' @param target_project_id integer optional. Target project to which script outputs will be added. -#' -#' @return A list containing the following elements: -#' \item{id}{integer, The ID for the script.} -#' \item{name}{string, The name of the script.} -#' \item{type}{string, The type of the script (e.g Custom)} -#' \item{createdAt}{string, The time this script was created.} -#' \item{updatedAt}{string, The time the script was last updated.} -#' \item{author}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{state}{string, The status of the script's last run.} -#' \item{finishedAt}{string, The time that the script's last run finished.} -#' \item{category}{string, } -#' \item{projects}{array, An array containing the following fields: -#' \itemize{ -#' \item id integer, The ID for the project. -#' \item name string, The name of the project. -#' }} -#' \item{parentId}{integer, The ID of the parent job that will trigger this script} -#' \item{params}{array, An array containing the following fields: -#' \itemize{ -#' \item name string, The variable's name as used within your code. -#' \item label string, The label to present to users when asking them for the value. -#' \item description string, A short sentence or fragment describing this parameter to the end user. -#' \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom -#' \item required boolean, Whether this param is required. -#' \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. -#' \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. -#' \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` -#' }} -#' \item{arguments}{list, Parameter-value pairs to use when running this script. Only settable if this script has defined parameters.} -#' \item{isTemplate}{boolean, Whether others scripts use this one as a template.} -#' \item{publishedAsTemplateId}{integer, The ID of the template that this script is backing.} -#' \item{fromTemplateId}{integer, The ID of the template script.} -#' \item{uiReportUrl}{integer, The url of the custom HTML.} -#' \item{uiReportId}{integer, The id of the report with the custom HTML.} -#' \item{uiReportProvideAPIKey}{boolean, Whether the ui report requests an API Key from the report viewer.} -#' \item{templateScriptName}{string, The name of the template script.} -#' \item{templateNote}{string, The template's note.} -#' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} -#' \item{credentialId}{integer, The credential that this script will use.} -#' \item{codePreview}{string, The code that this script will run with arguments inserted.} -#' \item{schedule}{list, A list containing the following elements: -#' \itemize{ -#' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. -#' \item scheduledHours array, Hours of the day it is scheduled on. -#' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. -#' }} -#' \item{notifications}{list, A list containing the following elements: -#' \itemize{ -#' \item urls array, URLs to receive a POST request at job completion -#' \item successEmailSubject string, Custom subject line for success e-mail. -#' \item successEmailBody string, Custom body text for success e-mail, written in Markdown. -#' \item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully. -#' \item successEmailFromName string, Name from which success emails are sent; defaults to "Civis." -#' \item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job. -#' \item failureEmailAddresses array, Addresses to notify by e-mail when the job fails. -#' \item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes. -#' \item successOn boolean, If success email notifications are on. -#' \item failureOn boolean, If failure email notifications are on. -#' }} -#' \item{runningAs}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, The ID of this user. -#' \item name string, This user's name. -#' \item username string, This user's username. -#' \item initials string, This user's initials. -#' \item online boolean, Whether this user is online. -#' }} -#' \item{timeZone}{string, The time zone of this script.} -#' \item{lastRun}{list, A list containing the following elements: -#' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. -#' }} -#' \item{hidden}{boolean, The hidden status of the item.} -#' \item{archived}{string, The archival status of the requested item(s).} -#' \item{targetProjectId}{integer, Target project to which script outputs will be added.} -#' \item{lastSuccessfulRun}{list, A list containing the following elements: +#' \item{requiredResources}{list, A list containing the following elements: #' \itemize{ -#' \item id integer, -#' \item state string, -#' \item createdAt string, The time that the run was queued. -#' \item startedAt string, The time that the run started. -#' \item finishedAt string, The time that the run completed. -#' \item error string, The error message for this run, if present. +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. #' }} +#' \item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export -scripts_patch_custom <- function(id, name = NULL, parent_id = NULL, arguments = NULL, remote_host_id = NULL, credential_id = NULL, schedule = NULL, notifications = NULL, time_zone = NULL, target_project_id = NULL) { +scripts_patch_custom <- function(id, name = NULL, parent_id = NULL, arguments = NULL, remote_host_id = NULL, credential_id = NULL, schedule = NULL, notifications = NULL, time_zone = NULL, target_project_id = NULL, required_resources = NULL, partition_label = NULL, running_as_id = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/custom/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, parentId = parent_id, arguments = arguments, remoteHostId = remote_host_id, credentialId = credential_id, schedule = schedule, notifications = notifications, timeZone = time_zone, targetProjectId = target_project_id) + body_params <- list(name = name, parentId = parent_id, arguments = arguments, remoteHostId = remote_host_id, credentialId = credential_id, schedule = schedule, notifications = notifications, timeZone = time_zone, targetProjectId = target_project_id, requiredResources = required_resources, partitionLabel = partition_label, runningAsId = running_as_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -22699,19 +25610,21 @@ scripts_delete_custom <- function(id) { #' @param id integer required. The ID of the sql. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this run.} -#' \item{sqlId}{integer, The ID of this sql.} -#' \item{state}{string, The state of this run.} +#' \item{id}{integer, The ID of the run.} +#' \item{sqlId}{integer, The ID of the sql.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started.} -#' \item{finishedAt}{string, The time that this run finished.} -#' \item{error}{string, The error message for this run, if present.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' \item{output}{array, An array containing the following fields: #' \itemize{ #' \item outputName string, The name of the output file. #' \item fileId integer, The unique ID of the output file. #' \item path string, The temporary link to download this output file, valid for 36 hours. #' }} +#' \item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} #' @export scripts_post_sql_runs <- function(id) { @@ -22738,19 +25651,21 @@ scripts_post_sql_runs <- function(id) { #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. #' #' @return An array containing the following fields: -#' \item{id}{integer, The ID of this run.} -#' \item{sqlId}{integer, The ID of this sql.} -#' \item{state}{string, The state of this run.} +#' \item{id}{integer, The ID of the run.} +#' \item{sqlId}{integer, The ID of the sql.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started.} -#' \item{finishedAt}{string, The time that this run finished.} -#' \item{error}{string, The error message for this run, if present.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' \item{output}{array, An array containing the following fields: #' \itemize{ #' \item outputName string, The name of the output file. #' \item fileId integer, The unique ID of the output file. #' \item path string, The temporary link to download this output file, valid for 36 hours. #' }} +#' \item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} #' @export scripts_list_sql_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -22774,19 +25689,21 @@ scripts_list_sql_runs <- function(id, limit = NULL, page_num = NULL, order = NUL #' @param run_id integer required. The ID of the run. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of this run.} -#' \item{sqlId}{integer, The ID of this sql.} -#' \item{state}{string, The state of this run.} +#' \item{id}{integer, The ID of the run.} +#' \item{sqlId}{integer, The ID of the sql.} +#' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started.} -#' \item{finishedAt}{string, The time that this run finished.} -#' \item{error}{string, The error message for this run, if present.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} +#' \item{error}{string, The error, if any, returned by the run.} #' \item{output}{array, An array containing the following fields: #' \itemize{ #' \item outputName string, The name of the output file. #' \item fileId integer, The unique ID of the output file. #' \item path string, The temporary link to download this output file, valid for 36 hours. #' }} +#' \item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} #' @export scripts_get_sql_runs <- function(id, run_id) { @@ -22828,6 +25745,30 @@ scripts_delete_sql_runs <- function(id, run_id) { } +#' Update the given run +#' @param id integer required. ID of the Job +#' @param run_id integer required. ID of the Run +#' @param error string optional. The error message to update +#' +#' @return An empty HTTP response +#' @export +scripts_patch_sql_runs <- function(id, run_id, error = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/sql/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list(error = error) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the logs for a run #' @param id integer required. The ID of the sql. #' @param run_id integer required. The ID of the run. @@ -22865,9 +25806,12 @@ scripts_list_sql_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) #' \item{containerId}{integer, The ID of the container.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_post_containers_runs <- function(id) { @@ -22898,9 +25842,12 @@ scripts_post_containers_runs <- function(id) { #' \item{containerId}{integer, The ID of the container.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_list_containers_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -22928,9 +25875,12 @@ scripts_list_containers_runs <- function(id, limit = NULL, page_num = NULL, orde #' \item{containerId}{integer, The ID of the container.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_get_containers_runs <- function(id, run_id) { @@ -22980,9 +25930,12 @@ scripts_delete_containers_runs <- function(id, run_id) { #' \item{pythonId}{integer, The ID of the python.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_post_python3_runs <- function(id) { @@ -23013,9 +25966,12 @@ scripts_post_python3_runs <- function(id) { #' \item{pythonId}{integer, The ID of the python.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_list_python3_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -23043,9 +25999,12 @@ scripts_list_python3_runs <- function(id, limit = NULL, page_num = NULL, order = #' \item{pythonId}{integer, The ID of the python.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_get_python3_runs <- function(id, run_id) { @@ -23087,6 +26046,30 @@ scripts_delete_python3_runs <- function(id, run_id) { } +#' Update the given run +#' @param id integer required. ID of the Job +#' @param run_id integer required. ID of the Run +#' @param error string optional. The error message to update +#' +#' @return An empty HTTP response +#' @export +scripts_patch_python3_runs <- function(id, run_id, error = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/python3/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list(error = error) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the logs for a run #' @param id integer required. The ID of the python. #' @param run_id integer required. The ID of the run. @@ -23124,9 +26107,12 @@ scripts_list_python3_runs_logs <- function(id, run_id, last_id = NULL, limit = N #' \item{rId}{integer, The ID of the r.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_post_r_runs <- function(id) { @@ -23157,9 +26143,12 @@ scripts_post_r_runs <- function(id) { #' \item{rId}{integer, The ID of the r.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_list_r_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -23187,9 +26176,12 @@ scripts_list_r_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, #' \item{rId}{integer, The ID of the r.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} #' @export scripts_get_r_runs <- function(id, run_id) { @@ -23231,6 +26223,30 @@ scripts_delete_r_runs <- function(id, run_id) { } +#' Update the given run +#' @param id integer required. ID of the Job +#' @param run_id integer required. ID of the Run +#' @param error string optional. The error message to update +#' +#' @return An empty HTTP response +#' @export +scripts_patch_r_runs <- function(id, run_id, error = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/r/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list(error = error) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the logs for a run #' @param id integer required. The ID of the r. #' @param run_id integer required. The ID of the run. @@ -23268,8 +26284,9 @@ scripts_list_r_runs_logs <- function(id, run_id, last_id = NULL, limit = NULL) { #' \item{javascriptId}{integer, The ID of the javascript.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export scripts_post_javascript_runs <- function(id) { @@ -23301,8 +26318,9 @@ scripts_post_javascript_runs <- function(id) { #' \item{javascriptId}{integer, The ID of the javascript.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export scripts_list_javascript_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -23331,8 +26349,9 @@ scripts_list_javascript_runs <- function(id, limit = NULL, page_num = NULL, orde #' \item{javascriptId}{integer, The ID of the javascript.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} #' @export scripts_get_javascript_runs <- function(id, run_id) { @@ -23375,6 +26394,30 @@ scripts_delete_javascript_runs <- function(id, run_id) { } +#' Update the given run +#' @param id integer required. ID of the Job +#' @param run_id integer required. ID of the Run +#' @param error string optional. The error message to update +#' +#' @return An empty HTTP response +#' @export +scripts_patch_javascript_runs <- function(id, run_id, error = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/javascript/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list(error = error) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the logs for a run #' @param id integer required. The ID of the javascript. #' @param run_id integer required. The ID of the run. @@ -23412,9 +26455,12 @@ scripts_list_javascript_runs_logs <- function(id, run_id, last_id = NULL, limit #' \item{customId}{integer, The ID of the custom.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB. Only available if the backing script is a Python, R, or container script.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores. Only available if the backing script is a Python, R, or container script.} #' @export scripts_post_custom_runs <- function(id) { @@ -23445,9 +26491,12 @@ scripts_post_custom_runs <- function(id) { #' \item{customId}{integer, The ID of the custom.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB. Only available if the backing script is a Python, R, or container script.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores. Only available if the backing script is a Python, R, or container script.} #' @export scripts_list_custom_runs <- function(id, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -23475,9 +26524,12 @@ scripts_list_custom_runs <- function(id, limit = NULL, page_num = NULL, order = #' \item{customId}{integer, The ID of the custom.} #' \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} #' \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -#' \item{startedAt}{string, The time the last run started at.} -#' \item{finishedAt}{string, The time the last run completed.} +#' \item{createdAt}{string, The time the run was created.} +#' \item{startedAt}{string, The time the run started at.} +#' \item{finishedAt}{string, The time the run completed.} #' \item{error}{string, The error, if any, returned by the run.} +#' \item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB. Only available if the backing script is a Python, R, or container script.} +#' \item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores. Only available if the backing script is a Python, R, or container script.} #' @export scripts_get_custom_runs <- function(id, run_id) { @@ -23890,11 +26942,35 @@ scripts_post_custom_runs_outputs <- function(id, run_id, object_type, object_id) } +#' Update the given run +#' @param id integer required. ID of the Job +#' @param run_id integer required. ID of the Run +#' @param error string optional. The error message to update +#' +#' @return An empty HTTP response +#' @export +scripts_patch_container_runs <- function(id, run_id, error = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/container/{id}/runs/{run_id}" + path_params <- list(id = id, run_id = run_id) + query_params <- list() + body_params <- list(error = error) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the git metadata attached to an item #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -23904,7 +26980,8 @@ scripts_post_custom_runs_outputs <- function(id, run_id, object_type, object_id) #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export scripts_list_sql_git <- function(id) { @@ -23929,10 +27006,11 @@ scripts_list_sql_git <- function(id) { #' @param git_branch string optional. The git branch that the file is on. #' @param git_path string optional. The path of the file in the repository. #' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. #' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -23942,15 +27020,16 @@ scripts_list_sql_git <- function(id) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -scripts_put_sql_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, pull_from_git = NULL) { +scripts_put_sql_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/sql/{id}/git" path_params <- list(id = id) query_params <- list() - body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, pullFromGit = pull_from_git) + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -23961,7 +27040,47 @@ scripts_put_sql_git <- function(id, git_ref = NULL, git_branch = NULL, git_path } -#' Get the git commits for an item +#' Update an attached git file +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. +#' +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +#' @export +scripts_patch_sql_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/sql/{id}/git" + path_params <- list(id = id) + query_params <- list() + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the git commits for an item on the current branch #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: @@ -24016,7 +27135,7 @@ scripts_post_sql_git_commits <- function(id, content, message, file_hash) { } -#' Get file contents at commit_hash +#' Get file contents at git ref #' @param id integer required. The ID of the file. #' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. #' @@ -24043,11 +27162,63 @@ scripts_get_sql_git_commits <- function(id, commit_hash) { } +#' Checkout latest commit on the current branch of a script or workflow +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_sql_git_checkout_latest <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/sql/{id}/git/checkout-latest" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Checkout content that the existing git_ref points to and save to the object +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_sql_git_checkout <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/sql/{id}/git/checkout" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the git metadata attached to an item #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -24057,7 +27228,8 @@ scripts_get_sql_git_commits <- function(id, commit_hash) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export scripts_list_javascript_git <- function(id) { @@ -24082,10 +27254,11 @@ scripts_list_javascript_git <- function(id) { #' @param git_branch string optional. The git branch that the file is on. #' @param git_path string optional. The path of the file in the repository. #' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. #' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -24095,15 +27268,16 @@ scripts_list_javascript_git <- function(id) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -scripts_put_javascript_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, pull_from_git = NULL) { +scripts_put_javascript_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/javascript/{id}/git" path_params <- list(id = id) query_params <- list() - body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, pullFromGit = pull_from_git) + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -24114,7 +27288,47 @@ scripts_put_javascript_git <- function(id, git_ref = NULL, git_branch = NULL, gi } -#' Get the git commits for an item +#' Update an attached git file +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. +#' +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +#' @export +scripts_patch_javascript_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/javascript/{id}/git" + path_params <- list(id = id) + query_params <- list() + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the git commits for an item on the current branch #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: @@ -24169,7 +27383,7 @@ scripts_post_javascript_git_commits <- function(id, content, message, file_hash) } -#' Get file contents at commit_hash +#' Get file contents at git ref #' @param id integer required. The ID of the file. #' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. #' @@ -24196,11 +27410,63 @@ scripts_get_javascript_git_commits <- function(id, commit_hash) { } +#' Checkout latest commit on the current branch of a script or workflow +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_javascript_git_checkout_latest <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/javascript/{id}/git/checkout-latest" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Checkout content that the existing git_ref points to and save to the object +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_javascript_git_checkout <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/javascript/{id}/git/checkout" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the git metadata attached to an item #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -24210,7 +27476,8 @@ scripts_get_javascript_git_commits <- function(id, commit_hash) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export scripts_list_python3_git <- function(id) { @@ -24235,10 +27502,11 @@ scripts_list_python3_git <- function(id) { #' @param git_branch string optional. The git branch that the file is on. #' @param git_path string optional. The path of the file in the repository. #' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. #' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -24248,15 +27516,16 @@ scripts_list_python3_git <- function(id) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -scripts_put_python3_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, pull_from_git = NULL) { +scripts_put_python3_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/python3/{id}/git" path_params <- list(id = id) query_params <- list() - body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, pullFromGit = pull_from_git) + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -24267,7 +27536,47 @@ scripts_put_python3_git <- function(id, git_ref = NULL, git_branch = NULL, git_p } -#' Get the git commits for an item +#' Update an attached git file +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. +#' +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +#' @export +scripts_patch_python3_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/python3/{id}/git" + path_params <- list(id = id) + query_params <- list() + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the git commits for an item on the current branch #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: @@ -24322,7 +27631,7 @@ scripts_post_python3_git_commits <- function(id, content, message, file_hash) { } -#' Get file contents at commit_hash +#' Get file contents at git ref #' @param id integer required. The ID of the file. #' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. #' @@ -24349,11 +27658,63 @@ scripts_get_python3_git_commits <- function(id, commit_hash) { } +#' Checkout latest commit on the current branch of a script or workflow +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_python3_git_checkout_latest <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/python3/{id}/git/checkout-latest" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Checkout content that the existing git_ref points to and save to the object +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_python3_git_checkout <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/python3/{id}/git/checkout" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Get the git metadata attached to an item #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -24363,7 +27724,8 @@ scripts_get_python3_git_commits <- function(id, commit_hash) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export scripts_list_r_git <- function(id) { @@ -24388,10 +27750,11 @@ scripts_list_r_git <- function(id) { #' @param git_branch string optional. The git branch that the file is on. #' @param git_path string optional. The path of the file in the repository. #' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. #' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -24401,15 +27764,16 @@ scripts_list_r_git <- function(id) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -scripts_put_r_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, pull_from_git = NULL) { +scripts_put_r_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] path <- "/scripts/r/{id}/git" path_params <- list(id = id) query_params <- list() - body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, pullFromGit = pull_from_git) + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -24420,7 +27784,47 @@ scripts_put_r_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = } -#' Get the git commits for an item +#' Update an attached git file +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. +#' +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +#' @export +scripts_patch_r_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/r/{id}/git" + path_params <- list(id = id) + query_params <- list() + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the git commits for an item on the current branch #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: @@ -24475,7 +27879,7 @@ scripts_post_r_git_commits <- function(id, content, message, file_hash) { } -#' Get file contents at commit_hash +#' Get file contents at git ref #' @param id integer required. The ID of the file. #' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. #' @@ -24502,6 +27906,58 @@ scripts_get_r_git_commits <- function(id, commit_hash) { } +#' Checkout latest commit on the current branch of a script or workflow +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_r_git_checkout_latest <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/r/{id}/git/checkout-latest" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Checkout content that the existing git_ref points to and save to the object +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +scripts_post_r_git_checkout <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/r/{id}/git/checkout" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List users and groups permissioned on this object #' @param id integer required. The ID of the resource that is shared. #' @@ -24673,6 +28129,70 @@ scripts_delete_sql_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +scripts_list_sql_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/sql/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +scripts_put_sql_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/sql/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a SQL script belongs to #' @param id integer required. The ID of the SQL script. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -24818,10 +28338,11 @@ scripts_delete_sql_projects <- function(id, project_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -24855,6 +28376,7 @@ scripts_delete_sql_projects <- function(id, project_id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -24873,6 +28395,7 @@ scripts_delete_sql_projects <- function(id, project_id) { #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' }} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_put_sql_archive <- function(id, status) { @@ -25062,6 +28585,70 @@ scripts_delete_containers_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +scripts_list_containers_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/containers/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +scripts_put_containers_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/containers/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a Container Script belongs to #' @param id integer required. The ID of the Container Script. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -25160,6 +28747,12 @@ scripts_delete_containers_projects <- function(id, project_id) { #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the container.} #' \item{type}{string, The type of the script (e.g Container)} #' \item{createdAt}{string, The time this script was created.} @@ -25207,10 +28800,11 @@ scripts_delete_containers_projects <- function(id, project_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -25235,8 +28829,8 @@ scripts_delete_containers_projects <- function(id, project_id) { #' }} #' \item{requiredResources}{list, A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. #' }} #' \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -25258,9 +28852,12 @@ scripts_delete_containers_projects <- function(id, project_id) { #' \item error string, The error message for this run, if present. #' }} #' \item{timeZone}{string, The time zone of this script.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_put_containers_archive <- function(id, status) { @@ -25450,6 +29047,70 @@ scripts_delete_python3_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +scripts_list_python3_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/python3/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +scripts_put_python3_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/python3/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a Python Script belongs to #' @param id integer required. The ID of the Python Script. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -25595,10 +29256,11 @@ scripts_delete_python3_projects <- function(id, project_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -25632,6 +29294,7 @@ scripts_delete_python3_projects <- function(id, project_id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -25645,6 +29308,8 @@ scripts_delete_python3_projects <- function(id, project_id) { #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_put_python3_archive <- function(id, status) { @@ -25834,6 +29499,70 @@ scripts_delete_r_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +scripts_list_r_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/r/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +scripts_put_r_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/r/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects an R Script belongs to #' @param id integer required. The ID of the R Script. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -25979,10 +29708,11 @@ scripts_delete_r_projects <- function(id, project_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -26016,6 +29746,7 @@ scripts_delete_r_projects <- function(id, project_id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -26029,6 +29760,8 @@ scripts_delete_r_projects <- function(id, project_id) { #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_put_r_archive <- function(id, status) { @@ -26218,6 +29951,70 @@ scripts_delete_javascript_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +scripts_list_javascript_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/javascript/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +scripts_put_javascript_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/javascript/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a JavaScript Script belongs to #' @param id integer required. The ID of the JavaScript Script. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -26363,10 +30160,11 @@ scripts_delete_javascript_projects <- function(id, project_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -26400,12 +30198,14 @@ scripts_delete_javascript_projects <- function(id, project_id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{source}{string, The body/text of the script.} #' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} #' \item{credentialId}{integer, The credential that this script will use.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_put_javascript_archive <- function(id, status) { @@ -26489,13 +30289,79 @@ scripts_list_custom_shares <- function(id) { #' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} #' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} #' @export -scripts_put_custom_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +scripts_put_custom_shares_users <- function(id, user_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/scripts/custom/{id}/shares/users" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Revoke the permissions a user has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. The ID of the user. +#' +#' @return An empty HTTP response +#' @export +scripts_delete_custom_shares_users <- function(id, user_id) { + + args <- as.list(match.call())[-1] + path <- "/scripts/custom/{id}/shares/users/{user_id}" + path_params <- list(id = id, user_id = user_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Set the permissions groups has on this object +#' @param id integer required. The ID of the resource that is shared. +#' @param group_ids array required. An array of one or more group IDs. +#' @param permission_level string required. Options are: "read", "write", or "manage". +#' @param share_email_body string optional. Custom body text for e-mail sent on a share. +#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' +#' @return A list containing the following elements: +#' \item{readers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{writers}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{owners}{list, A list containing the following elements: +#' \itemize{ +#' \item users array, +#' \item groups array, +#' }} +#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @export +scripts_put_custom_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { args <- as.list(match.call())[-1] - path <- "/scripts/custom/{id}/shares/users" + path <- "/scripts/custom/{id}/shares/groups" path_params <- list(id = id) query_params <- list() - body_params <- list(userIds = user_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -26506,17 +30372,17 @@ scripts_put_custom_shares_users <- function(id, user_ids, permission_level, shar } -#' Revoke the permissions a user has on this object +#' Revoke the permissions a group has on this object #' @param id integer required. The ID of the resource that is shared. -#' @param user_id integer required. The ID of the user. +#' @param group_id integer required. The ID of the group. #' #' @return An empty HTTP response #' @export -scripts_delete_custom_shares_users <- function(id, user_id) { +scripts_delete_custom_shares_groups <- function(id, group_id) { args <- as.list(match.call())[-1] - path <- "/scripts/custom/{id}/shares/users/{user_id}" - path_params <- list(id = id, user_id = user_id) + path <- "/scripts/custom/{id}/shares/groups/{group_id}" + path_params <- list(id = id, group_id = group_id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -26529,66 +30395,64 @@ scripts_delete_custom_shares_users <- function(id, user_id) { } -#' Set the permissions groups has on this object +#' List dependent objects for this object #' @param id integer required. The ID of the resource that is shared. -#' @param group_ids array required. An array of one or more group IDs. -#' @param permission_level string required. Options are: "read", "write", or "manage". -#' @param share_email_body string optional. Custom body text for e-mail sent on a share. -#' @param send_shared_email boolean optional. Send email to the recipients of a share. +#' @param user_id integer optional. ID of target user #' -#' @return A list containing the following elements: -#' \item{readers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{writers}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{owners}{list, A list containing the following elements: -#' \itemize{ -#' \item users array, -#' \item groups array, -#' }} -#' \item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} -#' \item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} #' @export -scripts_put_custom_shares_groups <- function(id, group_ids, permission_level, share_email_body = NULL, send_shared_email = NULL) { +scripts_list_custom_dependencies <- function(id, user_id = NULL) { args <- as.list(match.call())[-1] - path <- "/scripts/custom/{id}/shares/groups" + path <- "/scripts/custom/{id}/dependencies" path_params <- list(id = id) - query_params <- list() - body_params <- list(groupIds = group_ids, permissionLevel = permission_level, shareEmailBody = share_email_body, sendSharedEmail = send_shared_email) + query_params <- list(user_id = user_id) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("PUT", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Revoke the permissions a group has on this object +#' Transfer ownership of this object to another user #' @param id integer required. The ID of the resource that is shared. -#' @param group_id integer required. The ID of the group. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? #' -#' @return An empty HTTP response +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} #' @export -scripts_delete_custom_shares_groups <- function(id, group_id) { +scripts_put_custom_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { args <- as.list(match.call())[-1] - path <- "/scripts/custom/{id}/shares/groups/{group_id}" - path_params <- list(id = id, group_id = group_id) + path <- "/scripts/custom/{id}/transfer" + path_params <- list(id = id) query_params <- list() - body_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("DELETE", path, path_params, query_params, body_params) + resp <- call_api("PUT", path, path_params, query_params, body_params) return(resp) @@ -26693,6 +30557,12 @@ scripts_delete_custom_projects <- function(id, project_id) { #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the script.} #' \item{type}{string, The type of the script (e.g Custom)} #' \item{createdAt}{string, The time this script was created.} @@ -26740,10 +30610,11 @@ scripts_delete_custom_projects <- function(id, project_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -26776,6 +30647,7 @@ scripts_delete_custom_projects <- function(id, project_id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -26788,6 +30660,14 @@ scripts_delete_custom_projects <- function(id, project_id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{requiredResources}{list, A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' }} +#' \item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_put_custom_archive <- function(id, status) { @@ -26861,10 +30741,11 @@ scripts_put_custom_archive <- function(id, status) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -26898,6 +30779,7 @@ scripts_put_custom_archive <- function(id, status) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -26916,6 +30798,7 @@ scripts_put_custom_archive <- function(id, status) { #' \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null #' \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. #' }} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_post_sql_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { @@ -26989,10 +30872,11 @@ scripts_post_sql_clone <- function(id, clone_schedule = NULL, clone_triggers = N #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -27026,12 +30910,14 @@ scripts_post_sql_clone <- function(id, clone_schedule = NULL, clone_triggers = N #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{source}{string, The body/text of the script.} #' \item{remoteHostId}{integer, The remote host ID that this script will connect to.} #' \item{credentialId}{integer, The credential that this script will use.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_post_javascript_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { @@ -27105,10 +30991,11 @@ scripts_post_javascript_clone <- function(id, clone_schedule = NULL, clone_trigg #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -27142,6 +31029,7 @@ scripts_post_javascript_clone <- function(id, clone_schedule = NULL, clone_trigg #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -27155,6 +31043,8 @@ scripts_post_javascript_clone <- function(id, clone_schedule = NULL, clone_trigg #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_post_python3_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { @@ -27228,10 +31118,11 @@ scripts_post_python3_clone <- function(id, clone_schedule = NULL, clone_triggers #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -27265,6 +31156,7 @@ scripts_post_python3_clone <- function(id, clone_schedule = NULL, clone_triggers #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} #' \item{archived}{string, The archival status of the requested item(s).} @@ -27278,6 +31170,8 @@ scripts_post_python3_clone <- function(id, clone_schedule = NULL, clone_triggers #' \item{source}{string, The body/text of the script.} #' \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_post_r_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { @@ -27304,6 +31198,12 @@ scripts_post_r_clone <- function(id, clone_schedule = NULL, clone_triggers = NUL #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the script.} #' \item{type}{string, The type of the script (e.g Custom)} #' \item{createdAt}{string, The time this script was created.} @@ -27351,10 +31251,11 @@ scripts_post_r_clone <- function(id, clone_schedule = NULL, clone_triggers = NUL #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -27387,6 +31288,7 @@ scripts_post_r_clone <- function(id, clone_schedule = NULL, clone_triggers = NUL #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -27399,6 +31301,14 @@ scripts_post_r_clone <- function(id, clone_schedule = NULL, clone_triggers = NUL #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{requiredResources}{list, A list containing the following elements: +#' \itemize{ +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +#' }} +#' \item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_post_custom_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { @@ -27425,6 +31335,12 @@ scripts_post_custom_clone <- function(id, clone_schedule = NULL, clone_triggers #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for the script.} +#' \item{fromTemplateAliases}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The id of the Alias object. +#' \item objectId integer, The id of the object +#' \item alias string, The alias of the object +#' }} #' \item{name}{string, The name of the container.} #' \item{type}{string, The type of the script (e.g Container)} #' \item{createdAt}{string, The time this script was created.} @@ -27472,10 +31388,11 @@ scripts_post_custom_clone <- function(id, clone_schedule = NULL, clone_triggers #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ @@ -27500,8 +31417,8 @@ scripts_post_custom_clone <- function(id, clone_schedule = NULL, clone_triggers #' }} #' \item{requiredResources}{list, A list containing the following elements: #' \itemize{ -#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -#' \item memory integer, The amount of RAM to allocate for the container (in MB). +#' \item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +#' \item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. #' \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. #' }} #' \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -27523,9 +31440,12 @@ scripts_post_custom_clone <- function(id, clone_schedule = NULL, clone_triggers #' \item error string, The error message for this run, if present. #' }} #' \item{timeZone}{string, The time zone of this script.} +#' \item{partitionLabel}{string, The partition label used to run this object. } +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{hidden}{boolean, The hidden status of the item.} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{targetProjectId}{integer, Target project to which script outputs will be added.} +#' \item{runningAsId}{integer, The ID of the runner of this script.} #' @export scripts_post_containers_clone <- function(id, clone_schedule = NULL, clone_triggers = NULL, clone_notifications = NULL) { @@ -27573,6 +31493,7 @@ scripts_post_containers_clone <- function(id, clone_schedule = NULL, clone_trigg #' \item lastRunFinish string, The last run finish time of the item, if the item is a job. #' \item public boolean, The flag that indicates a template is available to all users. #' \item lastRunException string, The exception of the item after the last run, if the item is a job. +#' \item autoShare boolean, The flag that indicates if a project has Auto-Share enabled. #' }} #' @export search_list <- function(query = NULL, type = NULL, offset = NULL, order = NULL, owner = NULL, limit = NULL, archived = NULL, last_run_state = NULL) { @@ -27614,10 +31535,59 @@ search_list_types <- function() { } +#' Search queries that are not hidden +#' @param search_string string optional. Space delimited search terms for searching queries by their SQL. Supports wild card characters "?" for any single character, and "*" for zero or more characters. +#' @param database_id integer optional. The database ID. +#' @param credential_id integer optional. The credential ID. +#' @param author_id integer optional. The author of the query. +#' @param archived boolean optional. The archival status of the requested item(s). Defaults to false. +#' @param state array optional. The state of the last run. One or more of queued, running, succeeded, failed, and cancelled. +#' @param started_before string optional. An upper bound for the start date of the last run. +#' @param started_after string optional. A lower bound for the start date of the last run. +#' @param limit integer optional. Number of results to return. Defaults to 10. Maximum allowed is 50. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to last_run_started_at. Must be one of: last_run_started_at. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The query ID.} +#' \item{database}{integer, The database ID.} +#' \item{credential}{integer, The credential ID.} +#' \item{sql}{string, The SQL executed by the query.} +#' \item{authorId}{integer, The author of the query.} +#' \item{archived}{boolean, The archival status of the requested item(s).} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' \item{lastRun}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, +#' \item state string, The state of the run. One of queued, running, succeeded, failed, and cancelled. +#' \item startedAt string, The time that the run started. +#' \item finishedAt string, The time that the run completed. +#' \item error string, The error message for this run, if present. +#' }} +#' @export +search_list_queries <- function(search_string = NULL, database_id = NULL, credential_id = NULL, author_id = NULL, archived = NULL, state = NULL, started_before = NULL, started_after = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/search/queries" + path_params <- list() + query_params <- list(search_string = search_string, database_id = database_id, credential_id = credential_id, author_id = author_id, archived = archived, state = state, started_before = started_before, started_after = started_after, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List Services #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' @param archived string optional. The archival status of the requested item(s). -#' @param author string optional. If specified, return imports from this author. It accepts a comma-separated list of author IDs. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param status string optional. If specified, returns Services with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'idle'. #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. @@ -27651,31 +31621,14 @@ search_list_types <- function() { #' \item dockerImageName string, The name of the docker image to pull from DockerHub. #' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). #' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. #' \item state string, The state of the deployment. #' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. #' \item createdAt string, #' \item updatedAt string, -#' \item published boolean, -#' \item serviceId integer, The ID of owning Service -#' }} -#' \item{previewDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, #' \item serviceId integer, The ID of owning Service #' }} #' \item{archived}{string, The archival status of the requested item(s).} @@ -27714,7 +31667,6 @@ services_list <- function(hidden = NULL, archived = NULL, author = NULL, status #' @param memory integer optional. The amount of memory allocated to each replica of the Service. #' @param cpu integer optional. The amount of cpu allocated to each replica of the the Service. #' @param credentials array optional. A list of credential IDs to pass to the Service. -#' @param api_key_id integer optional. API key id of user #' @param permission_set_id integer optional. The ID of the associated permission set, if any. #' @param git_repo_url string optional. The url for the git repo where the Service code lives. #' @param git_repo_ref string optional. The git reference to use when pulling code from the repo. @@ -27725,6 +31677,7 @@ services_list <- function(hidden = NULL, archived = NULL, author = NULL, status #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' } +#' @param partition_label string optional. The partition label used to run this object. #' @param hidden boolean optional. The hidden status of the item. #' #' @return A list containing the following elements: @@ -27756,7 +31709,6 @@ services_list <- function(hidden = NULL, archived = NULL, author = NULL, status #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' \item{credentials}{array, A list of credential IDs to pass to the Service.} -#' \item{apiKeyId}{integer, API key id of user} #' \item{permissionSetId}{integer, The ID of the associated permission set, if any.} #' \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} #' \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -27772,52 +31724,35 @@ services_list <- function(hidden = NULL, archived = NULL, author = NULL, status #' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). #' \item displayUrl string, A signed URL for viewing the deployed item. #' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. #' \item state string, The state of the deployment. #' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. #' \item createdAt string, #' \item updatedAt string, -#' \item published boolean, -#' \item serviceId integer, The ID of owning Service -#' }} -#' \item{previewDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, #' \item serviceId integer, The ID of owning Service #' }} #' \item{currentUrl}{string, The URL that the service is hosted at.} -#' \item{previewUrl}{string, The URL that previews of the service are hosted at.} #' \item{environmentVariables}{list, Environment Variables to be passed into the Service.} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' }} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} #' @export -services_post <- function(name = NULL, description = NULL, type = NULL, docker_image_name = NULL, docker_image_tag = NULL, schedule = NULL, replicas = NULL, max_replicas = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, api_key_id = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, notifications = NULL, hidden = NULL) { +services_post <- function(name = NULL, description = NULL, type = NULL, docker_image_name = NULL, docker_image_tag = NULL, schedule = NULL, replicas = NULL, max_replicas = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, notifications = NULL, partition_label = NULL, hidden = NULL) { args <- as.list(match.call())[-1] path <- "/services/" path_params <- list() query_params <- list() - body_params <- list(name = name, description = description, type = type, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, schedule = schedule, replicas = replicas, maxReplicas = max_replicas, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, apiKeyId = api_key_id, permissionSetId = permission_set_id, gitRepoUrl = git_repo_url, gitRepoRef = git_repo_ref, gitPathDir = git_path_dir, environmentVariables = environment_variables, notifications = notifications, hidden = hidden) + body_params <- list(name = name, description = description, type = type, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, schedule = schedule, replicas = replicas, maxReplicas = max_replicas, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, permissionSetId = permission_set_id, gitRepoUrl = git_repo_url, gitRepoRef = git_repo_ref, gitPathDir = git_path_dir, environmentVariables = environment_variables, notifications = notifications, partitionLabel = partition_label, hidden = hidden) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -27860,7 +31795,6 @@ services_post <- function(name = NULL, description = NULL, type = NULL, docker_i #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' \item{credentials}{array, A list of credential IDs to pass to the Service.} -#' \item{apiKeyId}{integer, API key id of user} #' \item{permissionSetId}{integer, The ID of the associated permission set, if any.} #' \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} #' \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -27876,42 +31810,25 @@ services_post <- function(name = NULL, description = NULL, type = NULL, docker_i #' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). #' \item displayUrl string, A signed URL for viewing the deployed item. #' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. #' \item state string, The state of the deployment. #' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. #' \item createdAt string, #' \item updatedAt string, -#' \item published boolean, -#' \item serviceId integer, The ID of owning Service -#' }} -#' \item{previewDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, #' \item serviceId integer, The ID of owning Service #' }} #' \item{currentUrl}{string, The URL that the service is hosted at.} -#' \item{previewUrl}{string, The URL that previews of the service are hosted at.} #' \item{environmentVariables}{list, Environment Variables to be passed into the Service.} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' }} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} #' @export @@ -27949,7 +31866,6 @@ services_get <- function(id) { #' @param memory integer optional. The amount of memory allocated to each replica of the Service. #' @param cpu integer optional. The amount of cpu allocated to each replica of the the Service. #' @param credentials array optional. A list of credential IDs to pass to the Service. -#' @param api_key_id integer optional. API key id of user #' @param permission_set_id integer optional. The ID of the associated permission set, if any. #' @param git_repo_url string optional. The url for the git repo where the Service code lives. #' @param git_repo_ref string optional. The git reference to use when pulling code from the repo. @@ -27960,6 +31876,7 @@ services_get <- function(id) { #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' } +#' @param partition_label string optional. The partition label used to run this object. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for this Service.} @@ -27990,7 +31907,6 @@ services_get <- function(id) { #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' \item{credentials}{array, A list of credential IDs to pass to the Service.} -#' \item{apiKeyId}{integer, API key id of user} #' \item{permissionSetId}{integer, The ID of the associated permission set, if any.} #' \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} #' \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -28006,52 +31922,35 @@ services_get <- function(id) { #' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). #' \item displayUrl string, A signed URL for viewing the deployed item. #' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. #' \item state string, The state of the deployment. #' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. #' \item createdAt string, #' \item updatedAt string, -#' \item published boolean, -#' \item serviceId integer, The ID of owning Service -#' }} -#' \item{previewDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, #' \item serviceId integer, The ID of owning Service #' }} #' \item{currentUrl}{string, The URL that the service is hosted at.} -#' \item{previewUrl}{string, The URL that previews of the service are hosted at.} #' \item{environmentVariables}{list, Environment Variables to be passed into the Service.} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' }} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} #' @export -services_put <- function(id, name = NULL, description = NULL, docker_image_name = NULL, docker_image_tag = NULL, schedule = NULL, replicas = NULL, max_replicas = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, api_key_id = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, notifications = NULL) { +services_put <- function(id, name = NULL, description = NULL, docker_image_name = NULL, docker_image_tag = NULL, schedule = NULL, replicas = NULL, max_replicas = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, notifications = NULL, partition_label = NULL) { args <- as.list(match.call())[-1] path <- "/services/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, description = description, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, schedule = schedule, replicas = replicas, maxReplicas = max_replicas, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, apiKeyId = api_key_id, permissionSetId = permission_set_id, gitRepoUrl = git_repo_url, gitRepoRef = git_repo_ref, gitPathDir = git_path_dir, environmentVariables = environment_variables, notifications = notifications) + body_params <- list(name = name, description = description, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, schedule = schedule, replicas = replicas, maxReplicas = max_replicas, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, permissionSetId = permission_set_id, gitRepoUrl = git_repo_url, gitRepoRef = git_repo_ref, gitPathDir = git_path_dir, environmentVariables = environment_variables, notifications = notifications, partitionLabel = partition_label) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -28079,7 +31978,6 @@ services_put <- function(id, name = NULL, description = NULL, docker_image_name #' @param memory integer optional. The amount of memory allocated to each replica of the Service. #' @param cpu integer optional. The amount of cpu allocated to each replica of the the Service. #' @param credentials array optional. A list of credential IDs to pass to the Service. -#' @param api_key_id integer optional. API key id of user #' @param permission_set_id integer optional. The ID of the associated permission set, if any. #' @param git_repo_url string optional. The url for the git repo where the Service code lives. #' @param git_repo_ref string optional. The git reference to use when pulling code from the repo. @@ -28090,6 +31988,7 @@ services_put <- function(id, name = NULL, description = NULL, docker_image_name #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' } +#' @param partition_label string optional. The partition label used to run this object. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for this Service.} @@ -28120,7 +32019,6 @@ services_put <- function(id, name = NULL, description = NULL, docker_image_name #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' \item{credentials}{array, A list of credential IDs to pass to the Service.} -#' \item{apiKeyId}{integer, API key id of user} #' \item{permissionSetId}{integer, The ID of the associated permission set, if any.} #' \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} #' \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -28136,52 +32034,35 @@ services_put <- function(id, name = NULL, description = NULL, docker_image_name #' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). #' \item displayUrl string, A signed URL for viewing the deployed item. #' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item serviceId integer, The ID of owning Service -#' }} -#' \item{previewDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. #' \item state string, The state of the deployment. #' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. #' \item createdAt string, #' \item updatedAt string, -#' \item published boolean, #' \item serviceId integer, The ID of owning Service #' }} #' \item{currentUrl}{string, The URL that the service is hosted at.} -#' \item{previewUrl}{string, The URL that previews of the service are hosted at.} #' \item{environmentVariables}{list, Environment Variables to be passed into the Service.} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' }} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} #' @export -services_patch <- function(id, name = NULL, description = NULL, docker_image_name = NULL, docker_image_tag = NULL, schedule = NULL, replicas = NULL, max_replicas = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, api_key_id = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, notifications = NULL) { +services_patch <- function(id, name = NULL, description = NULL, docker_image_name = NULL, docker_image_tag = NULL, schedule = NULL, replicas = NULL, max_replicas = NULL, instance_type = NULL, memory = NULL, cpu = NULL, credentials = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, notifications = NULL, partition_label = NULL) { args <- as.list(match.call())[-1] path <- "/services/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, description = description, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, schedule = schedule, replicas = replicas, maxReplicas = max_replicas, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, apiKeyId = api_key_id, permissionSetId = permission_set_id, gitRepoUrl = git_repo_url, gitRepoRef = git_repo_ref, gitPathDir = git_path_dir, environmentVariables = environment_variables, notifications = notifications) + body_params <- list(name = name, description = description, dockerImageName = docker_image_name, dockerImageTag = docker_image_tag, schedule = schedule, replicas = replicas, maxReplicas = max_replicas, instanceType = instance_type, memory = memory, cpu = cpu, credentials = credentials, permissionSetId = permission_set_id, gitRepoUrl = git_repo_url, gitRepoRef = git_repo_ref, gitPathDir = git_path_dir, environmentVariables = environment_variables, notifications = notifications, partitionLabel = partition_label) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -28385,6 +32266,70 @@ services_delete_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +services_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/services/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +services_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/services/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Update the archive status of this object #' @param id integer required. The ID of the object. #' @param status boolean required. The desired archived status of the object. @@ -28418,7 +32363,6 @@ services_delete_shares_groups <- function(id, group_id) { #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' \item{credentials}{array, A list of credential IDs to pass to the Service.} -#' \item{apiKeyId}{integer, API key id of user} #' \item{permissionSetId}{integer, The ID of the associated permission set, if any.} #' \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} #' \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -28434,42 +32378,25 @@ services_delete_shares_groups <- function(id, group_id) { #' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). #' \item displayUrl string, A signed URL for viewing the deployed item. #' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item serviceId integer, The ID of owning Service -#' }} -#' \item{previewDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. #' \item state string, The state of the deployment. #' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. #' \item createdAt string, #' \item updatedAt string, -#' \item published boolean, #' \item serviceId integer, The ID of owning Service #' }} #' \item{currentUrl}{string, The URL that the service is hosted at.} -#' \item{previewUrl}{string, The URL that previews of the service are hosted at.} #' \item{environmentVariables}{list, Environment Variables to be passed into the Service.} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' }} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} #' @export @@ -28598,13 +32525,14 @@ services_delete_projects <- function(id, project_id) { #' \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} #' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{memory}{integer, The memory allocated to the deployment.} -#' \item{cpu}{integer, The cpu allocated to the deployment.} +#' \item{memory}{integer, The memory allocated to the deployment, in MB.} +#' \item{cpu}{integer, The cpu allocated to the deployment, in millicores.} #' \item{state}{string, The state of the deployment.} #' \item{stateMessage}{string, A detailed description of the state.} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{published}{boolean, } #' \item{serviceId}{integer, The ID of owning Service} #' @export services_list_deployments <- function(service_id, deployment_id = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -28627,7 +32555,6 @@ services_list_deployments <- function(service_id, deployment_id = NULL, limit = #' Deploy a Service #' @param service_id integer required. The ID of the owning Service #' @param deployment_id integer optional. The ID for this deployment -#' @param published boolean optional. #' #' @return A list containing the following elements: #' \item{deploymentId}{integer, The ID for this deployment.} @@ -28638,22 +32565,23 @@ services_list_deployments <- function(service_id, deployment_id = NULL, limit = #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} #' \item{displayUrl}{string, A signed URL for viewing the deployed item.} #' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{memory}{integer, The memory allocated to the deployment.} -#' \item{cpu}{integer, The cpu allocated to the deployment.} +#' \item{memory}{integer, The memory allocated to the deployment, in MB.} +#' \item{cpu}{integer, The cpu allocated to the deployment, in millicores.} #' \item{state}{string, The state of the deployment.} #' \item{stateMessage}{string, A detailed description of the state.} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{published}{boolean, } #' \item{serviceId}{integer, The ID of owning Service} #' @export -services_post_deployments <- function(service_id, deployment_id = NULL, published = NULL) { +services_post_deployments <- function(service_id, deployment_id = NULL) { args <- as.list(match.call())[-1] path <- "/services/{service_id}/deployments" path_params <- list(service_id = service_id) query_params <- list() - body_params <- list(deploymentId = deployment_id, published = published) + body_params <- list(deploymentId = deployment_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -28677,13 +32605,14 @@ services_post_deployments <- function(service_id, deployment_id = NULL, publishe #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} #' \item{displayUrl}{string, A signed URL for viewing the deployed item.} #' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{memory}{integer, The memory allocated to the deployment.} -#' \item{cpu}{integer, The cpu allocated to the deployment.} +#' \item{memory}{integer, The memory allocated to the deployment, in MB.} +#' \item{cpu}{integer, The cpu allocated to the deployment, in millicores.} #' \item{state}{string, The state of the deployment.} #' \item{stateMessage}{string, A detailed description of the state.} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{published}{boolean, } #' \item{serviceId}{integer, The ID of owning Service} #' @export services_get_deployments <- function(service_id, deployment_id) { @@ -28729,7 +32658,6 @@ services_delete_deployments <- function(service_id, deployment_id) { #' Redeploy a Service #' @param service_id integer required. The ID of the owning Service #' @param deployment_id integer optional. The ID for this deployment -#' @param published boolean optional. #' #' @return A list containing the following elements: #' \item{deploymentId}{integer, The ID for this deployment.} @@ -28740,22 +32668,23 @@ services_delete_deployments <- function(service_id, deployment_id) { #' \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} #' \item{displayUrl}{string, A signed URL for viewing the deployed item.} #' \item{instanceType}{string, The EC2 instance type requested for the deployment.} -#' \item{memory}{integer, The memory allocated to the deployment.} -#' \item{cpu}{integer, The cpu allocated to the deployment.} +#' \item{memory}{integer, The memory allocated to the deployment, in MB.} +#' \item{cpu}{integer, The cpu allocated to the deployment, in millicores.} #' \item{state}{string, The state of the deployment.} #' \item{stateMessage}{string, A detailed description of the state.} +#' \item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +#' \item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } -#' \item{published}{boolean, } #' \item{serviceId}{integer, The ID of owning Service} #' @export -services_post_redeploy <- function(service_id, deployment_id = NULL, published = NULL) { +services_post_redeploy <- function(service_id, deployment_id = NULL) { args <- as.list(match.call())[-1] path <- "/services/{service_id}/redeploy" path_params <- list(service_id = service_id) query_params <- list() - body_params <- list(deploymentId = deployment_id, published = published) + body_params <- list(deploymentId = deployment_id) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -28828,7 +32757,6 @@ services_list_deployments_logs <- function(id, deployment_id, start_at = NULL, e #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' \item{credentials}{array, A list of credential IDs to pass to the Service.} -#' \item{apiKeyId}{integer, API key id of user} #' \item{permissionSetId}{integer, The ID of the associated permission set, if any.} #' \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} #' \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -28844,42 +32772,25 @@ services_list_deployments_logs <- function(id, deployment_id, start_at = NULL, e #' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). #' \item displayUrl string, A signed URL for viewing the deployed item. #' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. -#' \item state string, The state of the deployment. -#' \item stateMessage string, A detailed description of the state. -#' \item createdAt string, -#' \item updatedAt string, -#' \item published boolean, -#' \item serviceId integer, The ID of owning Service -#' }} -#' \item{previewDeployment}{list, A list containing the following elements: -#' \itemize{ -#' \item deploymentId integer, The ID for this deployment. -#' \item userId integer, The ID of the owner. -#' \item host string, Domain of the deployment. -#' \item name string, Name of the deployment. -#' \item dockerImageName string, The name of the docker image to pull from DockerHub. -#' \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -#' \item displayUrl string, A signed URL for viewing the deployed item. -#' \item instanceType string, The EC2 instance type requested for the deployment. -#' \item memory integer, The memory allocated to the deployment. -#' \item cpu integer, The cpu allocated to the deployment. +#' \item memory integer, The memory allocated to the deployment, in MB. +#' \item cpu integer, The cpu allocated to the deployment, in millicores. #' \item state string, The state of the deployment. #' \item stateMessage string, A detailed description of the state. +#' \item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +#' \item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. #' \item createdAt string, #' \item updatedAt string, -#' \item published boolean, #' \item serviceId integer, The ID of owning Service #' }} #' \item{currentUrl}{string, The URL that the service is hosted at.} -#' \item{previewUrl}{string, The URL that previews of the service are hosted at.} #' \item{environmentVariables}{list, Environment Variables to be passed into the Service.} #' \item{notifications}{list, A list containing the following elements: #' \itemize{ #' \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. #' \item failureOn boolean, If failure email notifications are on #' }} +#' \item{partitionLabel}{string, The partition label used to run this object.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} #' @export @@ -28904,6 +32815,7 @@ services_post_clone <- function(id) { #' @param id integer required. The ID of the service. #' @param name string required. The name of the token. #' @param machine_token boolean optional. If true, create a compact token with no user information. +#' @param expires_in integer optional. The number of seconds until the token should expire #' #' @return A list containing the following elements: #' \item{id}{integer, The ID of the token.} @@ -28917,16 +32829,17 @@ services_post_clone <- function(id) { #' \item online boolean, Whether this user is online. #' }} #' \item{machineToken}{boolean, If true, this token is not tied to a particular user.} +#' \item{expiresAt}{string, The date and time when the token expires.} #' \item{createdAt}{string, The date and time when the token was created.} #' \item{token}{string, The value of the token. Only returned when the token is first created.} #' @export -services_post_tokens <- function(id, name, machine_token = NULL) { +services_post_tokens <- function(id, name, machine_token = NULL, expires_in = NULL) { args <- as.list(match.call())[-1] path <- "/services/{id}/tokens" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, machineToken = machine_token) + body_params <- list(name = name, machineToken = machine_token, expiresIn = expires_in) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -28952,6 +32865,7 @@ services_post_tokens <- function(id, name, machine_token = NULL) { #' \item online boolean, Whether this user is online. #' }} #' \item{machineToken}{boolean, If true, this token is not tied to a particular user.} +#' \item{expiresAt}{string, The date and time when the token expires.} #' \item{createdAt}{string, The date and time when the token was created.} #' @export services_list_tokens <- function(id) { @@ -29396,6 +33310,199 @@ storage_hosts_delete_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +storage_hosts_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/storage_hosts/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +storage_hosts_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/storage_hosts/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' List Table Tags +#' @param name string optional. Name of the tag. If it is provided, the results will be filtered by name +#' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. +#' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. +#' @param order string optional. The field on which to order the result set. Defaults to name. Must be one of: name, user, table_count. +#' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. +#' +#' @return An array containing the following fields: +#' \item{id}{integer, Table Tag ID} +#' \item{name}{string, Table Tag Name} +#' \item{tableCount}{integer, The total number of tables associated with the tag.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' @export +table_tags_list <- function(name = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { + + args <- as.list(match.call())[-1] + path <- "/table_tags/" + path_params <- list() + query_params <- list(name = name, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Create a Table Tag +#' @param name string required. Table Tag Name +#' +#' @return A list containing the following elements: +#' \item{id}{integer, Table Tag ID} +#' \item{name}{string, Table Tag Name} +#' \item{createdAt}{string, The date the tag was created.} +#' \item{updatedAt}{string, The date the tag was recently updated on.} +#' \item{tableCount}{integer, The total number of tables associated with the tag.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' @export +table_tags_post <- function(name) { + + args <- as.list(match.call())[-1] + path <- "/table_tags/" + path_params <- list() + query_params <- list() + body_params <- list(name = name) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get a Table Tag +#' @param id integer required. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, Table Tag ID} +#' \item{name}{string, Table Tag Name} +#' \item{createdAt}{string, The date the tag was created.} +#' \item{updatedAt}{string, The date the tag was recently updated on.} +#' \item{tableCount}{integer, The total number of tables associated with the tag.} +#' \item{user}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' @export +table_tags_get <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/table_tags/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Delete a Table Tag +#' @param id integer required. +#' +#' @return An empty HTTP response +#' @export +table_tags_delete <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/table_tags/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Geocode a table #' @param source_table_id integer required. The ID of the table to be enhanced. #' @@ -29428,6 +33535,7 @@ tables_post_enhancements_geocodings <- function(source_table_id) { #' @param perform_ncoa boolean optional. Whether to update addresses for records matching the National Change of Address (NCOA) database. #' @param ncoa_credential_id integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true. #' @param output_level string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned. +#' @param chunk_size integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID of the enhancement.} @@ -29438,14 +33546,15 @@ tables_post_enhancements_geocodings <- function(source_table_id) { #' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} #' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} #' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +#' \item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} #' @export -tables_post_enhancements_cass_ncoa <- function(source_table_id, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL) { +tables_post_enhancements_cass_ncoa <- function(source_table_id, perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, chunk_size = NULL) { args <- as.list(match.call())[-1] path <- "/tables/{source_table_id}/enhancements/cass-ncoa" path_params <- list(source_table_id = source_table_id) query_params <- list() - body_params <- list(performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level) + body_params <- list(performNcoa = perform_ncoa, ncoaCredentialId = ncoa_credential_id, outputLevel = output_level, chunkSize = chunk_size) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -29497,6 +33606,7 @@ tables_get_enhancements_geocodings <- function(id, source_table_id) { #' \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} #' \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} #' \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +#' \item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} #' @export tables_get_enhancements_cass_ncoa <- function(id, source_table_id) { @@ -29574,6 +33684,11 @@ tables_post_scan <- function(database_id, schema, table_name, stats_priority = N #' }} #' \item{primaryKeys}{array, The primary keys for this table.} #' \item{lastModifiedKeys}{array, The columns indicating an entry's modification status for this table.} +#' \item{tableTags}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, Table Tag ID +#' \item name string, Table Tag Name +#' }} #' \item{ontologyMapping}{list, The ontology-key to column-name mapping. See /ontology for the list of valid ontology keys.} #' \item{columns}{array, An array containing the following fields: #' \itemize{ @@ -29586,8 +33701,8 @@ tables_post_scan <- function(database_id, schema, table_name, stats_priority = N #' \item order integer, Relative position of the column in the table. #' \item minValue string, Smallest value in the column. #' \item maxValue string, Largest value in the column. -#' \item avgValue number, Average value of the column, where applicable. -#' \item stddev number, Stddev of the column, where applicable. +#' \item avgValue number, This parameter is deprecated. +#' \item stddev number, This parameter is deprecated. #' \item valueDistributionPercent object, A mapping between each value in the column and the percentage of rows with that value.Only present for tables with fewer than approximately 25,000,000 rows and for columns with fewer than twenty distinct values. #' \item coverageCount integer, Number of non-null values in the column. #' \item nullCount integer, Number of null values in the column. @@ -29595,7 +33710,7 @@ tables_post_scan <- function(database_id, schema, table_name, stats_priority = N #' \item useableAsIndependentVariable boolean, Whether the column may be used as an independent variable to train a model. #' \item useableAsPrimaryKey boolean, Whether the column may be used as an primary key to identify table rows. #' \item valueDistribution object, An object mapping distinct values in the column to the number of times they appear in the column -#' \item distinctCount integer, Number of distinct values in the column. +#' \item distinctCount integer, Number of distinct values in the column. NULL values are counted and treated as a single distinct value. #' }} #' \item{joins}{array, An array containing the following fields: #' \itemize{ @@ -29650,9 +33765,11 @@ tables_post_refresh <- function(id) { #' @param schema string optional. If specified, will be used to filter the tables returned. Substring matching is supported with "\%" and "*" wildcards (e.g., "schema=\%census\%" will return both "client_census.table" and "census_2010.table"). #' @param name string optional. If specified, will be used to filter the tables returned. Substring matching is supported with "\%" and "*" wildcards (e.g., "name=\%table\%" will return both "table1" and "my table"). #' @param search string optional. If specified, will be used to filter the tables returned. Will search across schema and name (in the full form schema.name) and will return any full name containing the search string. +#' @param table_tag_ids array optional. If specified, will be used to filter the tables returned. Will search across Table Tags and will return any tables that have one of the matching Table Tags. +#' @param credential_id integer optional. If specified, will be used instead of the default credential to filter the tables returned. #' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. -#' @param order string optional. The field on which to order the result set. Defaults to schema. Must be one of: schema, name, search. +#' @param order string optional. The field on which to order the result set. Defaults to schema. Must be one of: schema, name, search, table_tag_ids, credential_id. #' @param order_dir string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc. #' #' @return An array containing the following fields: @@ -29680,13 +33797,18 @@ tables_post_refresh <- function(id) { #' \item finishedAt string, The time that the run completed. #' \item error string, The error message for this run, if present. #' }} +#' \item{tableTags}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, Table Tag ID +#' \item name string, Table Tag Name +#' }} #' @export -tables_list <- function(database_id = NULL, schema = NULL, name = NULL, search = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +tables_list <- function(database_id = NULL, schema = NULL, name = NULL, search = NULL, table_tag_ids = NULL, credential_id = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] path <- "/tables/" path_params <- list() - query_params <- list(database_id = database_id, schema = schema, name = name, search = search, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(database_id = database_id, schema = schema, name = name, search = search, table_tag_ids = table_tag_ids, credential_id = credential_id, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -29730,6 +33852,11 @@ tables_list <- function(database_id = NULL, schema = NULL, name = NULL, search = #' }} #' \item{primaryKeys}{array, The primary keys for this table.} #' \item{lastModifiedKeys}{array, The columns indicating an entry's modification status for this table.} +#' \item{tableTags}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, Table Tag ID +#' \item name string, Table Tag Name +#' }} #' \item{ontologyMapping}{list, The ontology-key to column-name mapping. See /ontology for the list of valid ontology keys.} #' \item{columns}{array, An array containing the following fields: #' \itemize{ @@ -29742,8 +33869,8 @@ tables_list <- function(database_id = NULL, schema = NULL, name = NULL, search = #' \item order integer, Relative position of the column in the table. #' \item minValue string, Smallest value in the column. #' \item maxValue string, Largest value in the column. -#' \item avgValue number, Average value of the column, where applicable. -#' \item stddev number, Stddev of the column, where applicable. +#' \item avgValue number, This parameter is deprecated. +#' \item stddev number, This parameter is deprecated. #' \item valueDistributionPercent object, A mapping between each value in the column and the percentage of rows with that value.Only present for tables with fewer than approximately 25,000,000 rows and for columns with fewer than twenty distinct values. #' \item coverageCount integer, Number of non-null values in the column. #' \item nullCount integer, Number of null values in the column. @@ -29751,7 +33878,7 @@ tables_list <- function(database_id = NULL, schema = NULL, name = NULL, search = #' \item useableAsIndependentVariable boolean, Whether the column may be used as an independent variable to train a model. #' \item useableAsPrimaryKey boolean, Whether the column may be used as an primary key to identify table rows. #' \item valueDistribution object, An object mapping distinct values in the column to the number of times they appear in the column -#' \item distinctCount integer, Number of distinct values in the column. +#' \item distinctCount integer, Number of distinct values in the column. NULL values are counted and treated as a single distinct value. #' }} #' \item{joins}{array, An array containing the following fields: #' \itemize{ @@ -29837,6 +33964,11 @@ tables_get <- function(id) { #' }} #' \item{primaryKeys}{array, The primary keys for this table.} #' \item{lastModifiedKeys}{array, The columns indicating an entry's modification status for this table.} +#' \item{tableTags}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, Table Tag ID +#' \item name string, Table Tag Name +#' }} #' \item{ontologyMapping}{list, The ontology-key to column-name mapping. See /ontology for the list of valid ontology keys.} #' @export tables_patch <- function(id, ontology_mapping = NULL, description = NULL, primary_keys = NULL, last_modified_keys = NULL) { @@ -29874,8 +34006,8 @@ tables_patch <- function(id, ontology_mapping = NULL, description = NULL, primar #' \item{order}{integer, Relative position of the column in the table.} #' \item{minValue}{string, Smallest value in the column.} #' \item{maxValue}{string, Largest value in the column.} -#' \item{avgValue}{number, Average value of the column, where applicable.} -#' \item{stddev}{number, Stddev of the column, where applicable.} +#' \item{avgValue}{number, This parameter is deprecated.} +#' \item{stddev}{number, This parameter is deprecated.} #' \item{valueDistributionPercent}{list, A mapping between each value in the column and the percentage of rows with that value.Only present for tables with fewer than approximately 25,000,000 rows and for columns with fewer than twenty distinct values.} #' \item{coverageCount}{integer, Number of non-null values in the column.} #' \item{nullCount}{integer, Number of null values in the column.} @@ -29883,7 +34015,7 @@ tables_patch <- function(id, ontology_mapping = NULL, description = NULL, primar #' \item{useableAsIndependentVariable}{boolean, Whether the column may be used as an independent variable to train a model.} #' \item{useableAsPrimaryKey}{boolean, Whether the column may be used as an primary key to identify table rows.} #' \item{valueDistribution}{list, An object mapping distinct values in the column to the number of times they appear in the column} -#' \item{distinctCount}{integer, Number of distinct values in the column.} +#' \item{distinctCount}{integer, Number of distinct values in the column. NULL values are counted and treated as a single distinct value.} #' @export tables_list_columns <- function(id, name = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { @@ -29902,6 +34034,54 @@ tables_list_columns <- function(id, name = NULL, limit = NULL, page_num = NULL, } +#' Add a tag to a table +#' @param id integer required. The ID of the table. +#' @param table_tag_id integer required. The ID of the tag. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the table.} +#' \item{tableTagId}{integer, The ID of the tag.} +#' @export +tables_put_tags <- function(id, table_tag_id) { + + args <- as.list(match.call())[-1] + path <- "/tables/{id}/tags/{table_tag_id}" + path_params <- list(id = id, table_tag_id = table_tag_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Add a tag to a table +#' @param id integer required. The ID of the table. +#' @param table_tag_id integer required. The ID of the tag. +#' +#' @return An empty HTTP response +#' @export +tables_delete_tags <- function(id, table_tag_id) { + + args <- as.list(match.call())[-1] + path <- "/tables/{id}/tags/{table_tag_id}" + path_params <- list(id = id, table_tag_id = table_tag_id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a Table belongs to #' @param id integer required. The ID of the Table. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -30165,8 +34345,73 @@ templates_delete_reports_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +templates_list_reports_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/templates/reports/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +templates_put_reports_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/templates/reports/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List Report Templates #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param category string optional. A category to filter results by, one of: dataset-viz #' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. @@ -30181,6 +34426,7 @@ templates_delete_reports_shares_groups <- function(id, group_id) { #' \item{updatedAt}{string, } #' \item{useCount}{integer, The number of uses of this template.} #' \item{archived}{boolean, Whether the template has been archived.} +#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -30189,14 +34435,13 @@ templates_delete_reports_shares_groups <- function(id, group_id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' @export -templates_list_reports <- function(hidden = NULL, category = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +templates_list_reports <- function(hidden = NULL, author = NULL, category = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] path <- "/templates/reports/" path_params <- list() - query_params <- list(hidden = hidden, category = category, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(hidden = hidden, author = author, category = category, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -30224,6 +34469,7 @@ templates_list_reports <- function(hidden = NULL, category = NULL, limit = NULL, #' \item{updatedAt}{string, } #' \item{useCount}{integer, The number of uses of this template.} #' \item{archived}{boolean, Whether the template has been archived.} +#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -30232,7 +34478,6 @@ templates_list_reports <- function(hidden = NULL, category = NULL, limit = NULL, #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{authCodeUrl}{string, A URL to the template's stored code body.} #' \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} #' \item{hidden}{boolean, The hidden status of the item.} @@ -30265,6 +34510,7 @@ templates_post_reports <- function(name, code_body, category = NULL, archived = #' \item{updatedAt}{string, } #' \item{useCount}{integer, The number of uses of this template.} #' \item{archived}{boolean, Whether the template has been archived.} +#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -30273,7 +34519,6 @@ templates_post_reports <- function(name, code_body, category = NULL, archived = #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{authCodeUrl}{string, A URL to the template's stored code body.} #' \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} #' \item{hidden}{boolean, The hidden status of the item.} @@ -30311,6 +34556,7 @@ templates_get_reports <- function(id) { #' \item{updatedAt}{string, } #' \item{useCount}{integer, The number of uses of this template.} #' \item{archived}{boolean, Whether the template has been archived.} +#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -30319,7 +34565,6 @@ templates_get_reports <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{authCodeUrl}{string, A URL to the template's stored code body.} #' \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} #' \item{hidden}{boolean, The hidden status of the item.} @@ -30357,6 +34602,7 @@ templates_put_reports <- function(id, name, code_body, category = NULL, archived #' \item{updatedAt}{string, } #' \item{useCount}{integer, The number of uses of this template.} #' \item{archived}{boolean, Whether the template has been archived.} +#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -30365,7 +34611,6 @@ templates_put_reports <- function(id, name, code_body, category = NULL, archived #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{authCodeUrl}{string, A URL to the template's stored code body.} #' \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} #' \item{hidden}{boolean, The hidden status of the item.} @@ -30421,6 +34666,7 @@ templates_delete_reports <- function(id) { #' \item{updatedAt}{string, } #' \item{useCount}{integer, The number of uses of this template.} #' \item{archived}{boolean, Whether the template has been archived.} +#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{author}{list, A list containing the following elements: #' \itemize{ #' \item id integer, The ID of this user. @@ -30429,7 +34675,6 @@ templates_delete_reports <- function(id) { #' \item initials string, This user's initials. #' \item online boolean, Whether this user is online. #' }} -#' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{authCodeUrl}{string, A URL to the template's stored code body.} #' \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} #' \item{hidden}{boolean, The hidden status of the item.} @@ -30622,6 +34867,70 @@ templates_delete_scripts_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +templates_list_scripts_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/templates/scripts/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +templates_put_scripts_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/templates/scripts/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List the projects a Script Template belongs to #' @param id integer required. The ID of the Script Template. #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. @@ -30716,6 +35025,7 @@ templates_delete_scripts_projects <- function(id, project_id) { #' List Script Templates #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param category string optional. A category to filter results by, one of: import, export, enhancement, model, and script #' @param limit integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. @@ -30735,13 +35045,21 @@ templates_delete_scripts_projects <- function(id, project_id) { #' \item{uiReportId}{integer, The id of the report that this template uses.} #' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{archived}{boolean, Whether the template has been archived.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} #' @export -templates_list_scripts <- function(hidden = NULL, category = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +templates_list_scripts <- function(hidden = NULL, author = NULL, category = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] path <- "/templates/scripts/" path_params <- list() - query_params <- list(hidden = hidden, category = category, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(hidden = hidden, author = author, category = category, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -30767,6 +35085,17 @@ templates_list_scripts <- function(hidden = NULL, category = NULL, limit = NULL, #' \item{scriptId}{integer, The id of the script that this template uses.} #' \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} #' \item{userContext}{string, The user context of the script that this template uses.} +#' \item{params}{array, An array containing the following fields: +#' \itemize{ +#' \item name string, The variable's name as used within your code. +#' \item label string, The label to present to users when asking them for the value. +#' \item description string, A short sentence or fragment describing this parameter to the end user. +#' \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +#' \item required boolean, Whether this param is required. +#' \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +#' \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +#' \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +#' }} #' \item{name}{string, The name of the template.} #' \item{category}{string, The category of this template.} #' \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -30777,6 +35106,15 @@ templates_list_scripts <- function(hidden = NULL, category = NULL, limit = NULL, #' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{archived}{boolean, Whether the template has been archived.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export templates_post_scripts <- function(script_id, name, note = NULL, ui_report_id = NULL, archived = NULL, hidden = NULL) { @@ -30804,6 +35142,17 @@ templates_post_scripts <- function(script_id, name, note = NULL, ui_report_id = #' \item{scriptId}{integer, The id of the script that this template uses.} #' \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} #' \item{userContext}{string, The user context of the script that this template uses.} +#' \item{params}{array, An array containing the following fields: +#' \itemize{ +#' \item name string, The variable's name as used within your code. +#' \item label string, The label to present to users when asking them for the value. +#' \item description string, A short sentence or fragment describing this parameter to the end user. +#' \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +#' \item required boolean, Whether this param is required. +#' \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +#' \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +#' \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +#' }} #' \item{name}{string, The name of the template.} #' \item{category}{string, The category of this template.} #' \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -30814,6 +35163,15 @@ templates_post_scripts <- function(script_id, name, note = NULL, ui_report_id = #' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{archived}{boolean, Whether the template has been archived.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export templates_get_scripts <- function(id) { @@ -30845,6 +35203,17 @@ templates_get_scripts <- function(id) { #' \item{scriptId}{integer, The id of the script that this template uses.} #' \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} #' \item{userContext}{string, The user context of the script that this template uses.} +#' \item{params}{array, An array containing the following fields: +#' \itemize{ +#' \item name string, The variable's name as used within your code. +#' \item label string, The label to present to users when asking them for the value. +#' \item description string, A short sentence or fragment describing this parameter to the end user. +#' \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +#' \item required boolean, Whether this param is required. +#' \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +#' \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +#' \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +#' }} #' \item{name}{string, The name of the template.} #' \item{category}{string, The category of this template.} #' \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -30855,6 +35224,15 @@ templates_get_scripts <- function(id) { #' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{archived}{boolean, Whether the template has been archived.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export templates_put_scripts <- function(id, name, note = NULL, ui_report_id = NULL, archived = NULL) { @@ -30886,6 +35264,17 @@ templates_put_scripts <- function(id, name, note = NULL, ui_report_id = NULL, ar #' \item{scriptId}{integer, The id of the script that this template uses.} #' \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} #' \item{userContext}{string, The user context of the script that this template uses.} +#' \item{params}{array, An array containing the following fields: +#' \itemize{ +#' \item name string, The variable's name as used within your code. +#' \item label string, The label to present to users when asking them for the value. +#' \item description string, A short sentence or fragment describing this parameter to the end user. +#' \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +#' \item required boolean, Whether this param is required. +#' \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +#' \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +#' \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +#' }} #' \item{name}{string, The name of the template.} #' \item{category}{string, The category of this template.} #' \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -30896,6 +35285,15 @@ templates_put_scripts <- function(id, name, note = NULL, ui_report_id = NULL, ar #' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{archived}{boolean, Whether the template has been archived.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export templates_patch_scripts <- function(id, name = NULL, note = NULL, ui_report_id = NULL, archived = NULL) { @@ -30946,6 +35344,17 @@ templates_delete_scripts <- function(id) { #' \item{scriptId}{integer, The id of the script that this template uses.} #' \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} #' \item{userContext}{string, The user context of the script that this template uses.} +#' \item{params}{array, An array containing the following fields: +#' \itemize{ +#' \item name string, The variable's name as used within your code. +#' \item label string, The label to present to users when asking them for the value. +#' \item description string, A short sentence or fragment describing this parameter to the end user. +#' \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +#' \item required boolean, Whether this param is required. +#' \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +#' \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +#' \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +#' }} #' \item{name}{string, The name of the template.} #' \item{category}{string, The category of this template.} #' \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -30956,6 +35365,15 @@ templates_delete_scripts <- function(id) { #' \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} #' \item{archived}{boolean, Whether the template has been archived.} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{author}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of this user. +#' \item name string, This user's name. +#' \item username string, This user's username. +#' \item initials string, This user's initials. +#' \item online boolean, Whether this user is online. +#' }} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' @export templates_post_scripts_review <- function(id, status) { @@ -30976,10 +35394,11 @@ templates_post_scripts_review <- function(id, status) { #' List users #' @param feature_flag string optional. Return users that have a feature flag enabled. -#' @param account_status string optional. The account status by which to filter users. May be one of "active", "inactive", or "all". -#' @param query string optional. Return users who match the given query, based on name, user, and email. -#' @param group_id integer optional. The ID of the group by which to filter users. Cannot be present if organization_id is. -#' @param organization_id integer optional. The ID of the organization by which to filter users. Cannot be present if group_id is. +#' @param account_status string optional. The account status by which to filter users. May be one of "active", "inactive", or "all". Defaults to active. +#' @param query string optional. Return users who match the given query, based on name, user, email, and id. +#' @param group_id integer optional. The ID of the group by which to filter users. Cannot be present if group_ids is. +#' @param group_ids array optional. The IDs of the groups by which to filter users. Cannot be present if group_id is. +#' @param organization_id integer optional. The ID of the organization by which to filter users. #' @param exclude_groups boolean optional. Whether or to exclude users' groups. Default: false. #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 10000. #' @param page_num integer optional. Page number of the results to return. Defaults to the first page, 1. @@ -30991,23 +35410,30 @@ templates_post_scripts_review <- function(id, status) { #' \item{user}{string, The username of this user.} #' \item{name}{string, The name of this user.} #' \item{email}{string, The email of this user.} -#' \item{active}{boolean, The account status of this user.} +#' \item{active}{boolean, Whether this user account is active or deactivated.} #' \item{primaryGroupId}{integer, The ID of the primary group of this user.} #' \item{groups}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this group. #' \item name string, The name of this group. -#' \item organizationId integer, The organization associated with this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. #' }} #' \item{createdAt}{string, The date and time when the user was created.} #' \item{currentSignInAt}{string, The date and time when the user's current session began.} +#' \item{updatedAt}{string, The date and time when the user was last updated.} +#' \item{lastSeenAt}{string, The date and time when the user last visited Platform.} +#' \item{suspended}{boolean, Whether the user is suspended due to inactivity.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} #' @export -users_list <- function(feature_flag = NULL, account_status = NULL, query = NULL, group_id = NULL, organization_id = NULL, exclude_groups = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { +users_list <- function(feature_flag = NULL, account_status = NULL, query = NULL, group_id = NULL, group_ids = NULL, organization_id = NULL, exclude_groups = NULL, limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) { args <- as.list(match.call())[-1] path <- "/users/" path_params <- list() - query_params <- list(feature_flag = feature_flag, account_status = account_status, query = query, group_id = group_id, organization_id = organization_id, exclude_groups = exclude_groups, limit = limit, page_num = page_num, order = order, order_dir = order_dir) + query_params <- list(feature_flag = feature_flag, account_status = account_status, query = query, group_id = group_id, group_ids = group_ids, organization_id = organization_id, exclude_groups = exclude_groups, limit = limit, page_num = page_num, order = order, order_dir = order_dir) body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] @@ -31019,12 +35445,12 @@ users_list <- function(feature_flag = NULL, account_status = NULL, query = NULL, } -#' Create a new user (must be an admin or client user admin) +#' Create a new user (must be a team or org admin) #' @param name string required. The name of this user. #' @param email string required. The email of this user. #' @param primary_group_id integer required. The ID of the primary group of this user. #' @param user string required. The username of this user. -#' @param active boolean optional. The account status of this user. +#' @param active boolean optional. Whether this user account is active or deactivated. #' @param city string optional. The city of this user. #' @param state string optional. The state of this user. #' @param time_zone string optional. The time zone of this user. @@ -31045,13 +35471,15 @@ users_list <- function(feature_flag = NULL, account_status = NULL, query = NULL, #' \item{user}{string, The username of this user.} #' \item{name}{string, The name of this user.} #' \item{email}{string, The email of this user.} -#' \item{active}{boolean, The account status of this user.} +#' \item{active}{boolean, Whether this user account is active or deactivated.} #' \item{primaryGroupId}{integer, The ID of the primary group of this user.} #' \item{groups}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this group. #' \item name string, The name of this group. -#' \item organizationId integer, The organization associated with this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. #' }} #' \item{city}{string, The city of this user.} #' \item{state}{string, The state of this user.} @@ -31072,6 +35500,15 @@ users_list <- function(feature_flag = NULL, account_status = NULL, query = NULL, #' \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} #' \item{organizationLoginType}{string, The user's organization's login type.} #' \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{createdAt}{string, The date and time when the user was created.} +#' \item{updatedAt}{string, The date and time when the user was last updated.} +#' \item{lastSeenAt}{string, The date and time when the user last visited Platform.} +#' \item{suspended}{boolean, Whether the user is suspended due to inactivity.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +#' \item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +#' \item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} #' @export users_post <- function(name, email, primary_group_id, user, active = NULL, city = NULL, state = NULL, time_zone = NULL, initials = NULL, department = NULL, title = NULL, prefers_sms_otp = NULL, group_ids = NULL, vpn_enabled = NULL, sso_disabled = NULL, otp_required_for_login = NULL, exempt_from_org_sms_otp_disabled = NULL, robot = NULL, send_email = NULL) { @@ -31108,7 +35545,9 @@ users_post <- function(name, email, primary_group_id, user, active = NULL, city #' \itemize{ #' \item id integer, The ID of this group. #' \item name string, The name of this group. -#' \item organizationId integer, The organization associated with this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. #' }} #' \item{organizationName}{string, The name of the organization the user belongs to.} #' \item{organizationSlug}{string, The slug of the organization the user belongs to.} @@ -31118,6 +35557,10 @@ users_post <- function(name, email, primary_group_id, user, active = NULL, city #' \item{assumingRole}{boolean, Whether the user is assuming a role or not.} #' \item{assumingAdmin}{boolean, Whether the user is assuming admin.} #' \item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +#' \item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +#' \item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} #' @export users_list_me <- function() { @@ -31139,13 +35582,13 @@ users_list_me <- function() { #' Update info about the logged-in user #' @param preferences list optional. A list containing the following elements: #' \itemize{ -#' \item appIndexOrderField string, Order field for the apps index pages. -#' \item appIndexOrderDir string, Order direction for the apps index pages. -#' \item resultIndexOrderField string, Order field for the results index page. -#' \item resultIndexOrderDir string, Order direction for the results index page. -#' \item resultIndexTypeFilter string, Type filter for the results index page. -#' \item resultIndexAuthorFilter string, Author filter for the results index page. -#' \item resultIndexArchivedFilter string, Archived filter for the results index page. +#' \item appIndexOrderField string, This attribute is deprecated +#' \item appIndexOrderDir string, This attribute is deprecated +#' \item resultIndexOrderField string, Order field for the reports index page. +#' \item resultIndexOrderDir string, Order direction for the reports index page. +#' \item resultIndexTypeFilter string, Type filter for the reports index page. +#' \item resultIndexAuthorFilter string, Author filter for the reports index page. +#' \item resultIndexArchivedFilter string, Archived filter for the reports index page. #' \item importIndexOrderField string, Order field for the imports index page. #' \item importIndexOrderDir string, Order direction for the imports index page. #' \item importIndexTypeFilter string, Type filter for the imports index page. @@ -31210,6 +35653,9 @@ users_list_me <- function() { #' \item serviceOrderDir string, Order direction for the services page. #' \item serviceAuthorFilter string, Author filter for the services page. #' \item serviceArchivedFilter string, Archived filter for the services page. +#' \item assumeRoleHistory string, JSON string of previously assumed roles. +#' \item defaultSuccessNotificationsOn boolean, Whether email notifications for the success of all applicable jobs are on by default. +#' \item defaultFailureNotificationsOn boolean, Whether email notifications for the failure of all applicable jobs are on by default. #' } #' @param last_checked_announcements string optional. The date and time at which the user last checked their announcements. #' @@ -31229,7 +35675,9 @@ users_list_me <- function() { #' \itemize{ #' \item id integer, The ID of this group. #' \item name string, The name of this group. -#' \item organizationId integer, The organization associated with this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. #' }} #' \item{organizationName}{string, The name of the organization the user belongs to.} #' \item{organizationSlug}{string, The slug of the organization the user belongs to.} @@ -31239,6 +35687,10 @@ users_list_me <- function() { #' \item{assumingRole}{boolean, Whether the user is assuming a role or not.} #' \item{assumingAdmin}{boolean, Whether the user is assuming admin.} #' \item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +#' \item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +#' \item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} #' @export users_patch_me <- function(preferences = NULL, last_checked_announcements = NULL) { @@ -31267,8 +35719,8 @@ users_patch_me <- function(preferences = NULL, last_checked_announcements = NULL #' \itemize{ #' \item vendor boolean, This attribute is deprecated #' \item media boolean, True if user has access to the Media Optimizer job type. -#' \item mainApp string, The slug for the main app for an app-only user account. -#' \item appCount integer, Number of apps this user has access to. +#' \item mainApp string, This attribute is deprecated +#' \item appCount integer, This attribute is deprecated #' \item reportsOnly boolean, True if user is a reports-only user. #' \item reportsCreator boolean, True if this user is allowed to create HTML reports. #' }} @@ -31291,6 +35743,168 @@ users_list_me_ui <- function() { } +#' List themes +#' +#' @return An array containing the following fields: +#' \item{id}{integer, The ID of this theme.} +#' \item{name}{string, The name of this theme.} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' @export +users_list_me_themes <- function() { + + args <- as.list(match.call())[-1] + path <- "/users/me/themes" + path_params <- list() + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Show a theme +#' @param id integer required. The ID of this theme. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this theme.} +#' \item{name}{string, The name of this theme.} +#' \item{organizationIds}{array, List of organization ID's allowed to use this theme.} +#' \item{settings}{string, The theme configuration object.} +#' \item{logoFile}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID of the logo image file. +#' \item downloadUrl string, The URL of the logo image file. +#' }} +#' \item{createdAt}{string, } +#' \item{updatedAt}{string, } +#' @export +users_get_me_themes <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/users/me/themes/{id}" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Enables Superadmin Mode for the current user +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this user.} +#' \item{name}{string, This user's name.} +#' \item{email}{string, This user's email address.} +#' \item{username}{string, This user's username.} +#' \item{initials}{string, This user's initials.} +#' \item{lastCheckedAnnouncements}{string, The date and time at which the user last checked their announcements.} +#' \item{featureFlags}{list, The feature flag settings for this user.} +#' \item{roles}{array, The roles this user has, listed by slug.} +#' \item{preferences}{list, This user's preferences.} +#' \item{customBranding}{string, The branding of Platform for this user.} +#' \item{primaryGroupId}{integer, The ID of the primary group of this user.} +#' \item{groups}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this group. +#' \item name string, The name of this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. +#' }} +#' \item{organizationName}{string, The name of the organization the user belongs to.} +#' \item{organizationSlug}{string, The slug of the organization the user belongs to.} +#' \item{organizationDefaultThemeId}{integer, The ID of the organizations's default theme.} +#' \item{createdAt}{string, The date and time when the user was created.} +#' \item{signInCount}{integer, The number of times the user has signed in.} +#' \item{assumingRole}{boolean, Whether the user is assuming a role or not.} +#' \item{assumingAdmin}{boolean, Whether the user is assuming admin.} +#' \item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +#' \item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +#' \item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +#' @export +users_post_me_superadmin <- function() { + + args <- as.list(match.call())[-1] + path <- "/users/me/superadmin" + path_params <- list() + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Disables Superadmin Mode for the current user +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this user.} +#' \item{name}{string, This user's name.} +#' \item{email}{string, This user's email address.} +#' \item{username}{string, This user's username.} +#' \item{initials}{string, This user's initials.} +#' \item{lastCheckedAnnouncements}{string, The date and time at which the user last checked their announcements.} +#' \item{featureFlags}{list, The feature flag settings for this user.} +#' \item{roles}{array, The roles this user has, listed by slug.} +#' \item{preferences}{list, This user's preferences.} +#' \item{customBranding}{string, The branding of Platform for this user.} +#' \item{primaryGroupId}{integer, The ID of the primary group of this user.} +#' \item{groups}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this group. +#' \item name string, The name of this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. +#' }} +#' \item{organizationName}{string, The name of the organization the user belongs to.} +#' \item{organizationSlug}{string, The slug of the organization the user belongs to.} +#' \item{organizationDefaultThemeId}{integer, The ID of the organizations's default theme.} +#' \item{createdAt}{string, The date and time when the user was created.} +#' \item{signInCount}{integer, The number of times the user has signed in.} +#' \item{assumingRole}{boolean, Whether the user is assuming a role or not.} +#' \item{assumingAdmin}{boolean, Whether the user is assuming admin.} +#' \item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +#' \item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +#' \item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +#' @export +users_delete_me_superadmin <- function() { + + args <- as.list(match.call())[-1] + path <- "/users/me/superadmin" + path_params <- list() + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Show info about a user #' @param id integer required. The ID of this user. #' @@ -31299,13 +35913,15 @@ users_list_me_ui <- function() { #' \item{user}{string, The username of this user.} #' \item{name}{string, The name of this user.} #' \item{email}{string, The email of this user.} -#' \item{active}{boolean, The account status of this user.} +#' \item{active}{boolean, Whether this user account is active or deactivated.} #' \item{primaryGroupId}{integer, The ID of the primary group of this user.} #' \item{groups}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this group. #' \item name string, The name of this group. -#' \item organizationId integer, The organization associated with this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. #' }} #' \item{city}{string, The city of this user.} #' \item{state}{string, The state of this user.} @@ -31326,6 +35942,15 @@ users_list_me_ui <- function() { #' \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} #' \item{organizationLoginType}{string, The user's organization's login type.} #' \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{createdAt}{string, The date and time when the user was created.} +#' \item{updatedAt}{string, The date and time when the user was last updated.} +#' \item{lastSeenAt}{string, The date and time when the user last visited Platform.} +#' \item{suspended}{boolean, Whether the user is suspended due to inactivity.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +#' \item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +#' \item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} #' @export users_get <- function(id) { @@ -31344,11 +35969,11 @@ users_get <- function(id) { } -#' Update info about a user (must be an admin or client user admin) +#' Update info about a user (must be a team or org admin) #' @param id integer required. The ID of this user. #' @param name string optional. The name of this user. #' @param email string optional. The email of this user. -#' @param active boolean optional. The account status of this user. +#' @param active boolean optional. Whether this user account is active or deactivated. #' @param primary_group_id integer optional. The ID of the primary group of this user. #' @param city string optional. The city of this user. #' @param state string optional. The state of this user. @@ -31365,19 +35990,22 @@ users_get <- function(id) { #' @param robot boolean optional. Whether the user is a robot. #' @param phone string optional. The phone number of this user. #' @param password string optional. The password of this user. +#' @param account_status string optional. Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended" #' #' @return A list containing the following elements: #' \item{id}{integer, The ID of this user.} #' \item{user}{string, The username of this user.} #' \item{name}{string, The name of this user.} #' \item{email}{string, The email of this user.} -#' \item{active}{boolean, The account status of this user.} +#' \item{active}{boolean, Whether this user account is active or deactivated.} #' \item{primaryGroupId}{integer, The ID of the primary group of this user.} #' \item{groups}{array, An array containing the following fields: #' \itemize{ #' \item id integer, The ID of this group. #' \item name string, The name of this group. -#' \item organizationId integer, The organization associated with this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. #' }} #' \item{city}{string, The city of this user.} #' \item{state}{string, The state of this user.} @@ -31398,14 +36026,23 @@ users_get <- function(id) { #' \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} #' \item{organizationLoginType}{string, The user's organization's login type.} #' \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{createdAt}{string, The date and time when the user was created.} +#' \item{updatedAt}{string, The date and time when the user was last updated.} +#' \item{lastSeenAt}{string, The date and time when the user last visited Platform.} +#' \item{suspended}{boolean, Whether the user is suspended due to inactivity.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +#' \item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +#' \item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} #' @export -users_patch <- function(id, name = NULL, email = NULL, active = NULL, primary_group_id = NULL, city = NULL, state = NULL, time_zone = NULL, initials = NULL, department = NULL, title = NULL, prefers_sms_otp = NULL, group_ids = NULL, vpn_enabled = NULL, sso_disabled = NULL, otp_required_for_login = NULL, exempt_from_org_sms_otp_disabled = NULL, robot = NULL, phone = NULL, password = NULL) { +users_patch <- function(id, name = NULL, email = NULL, active = NULL, primary_group_id = NULL, city = NULL, state = NULL, time_zone = NULL, initials = NULL, department = NULL, title = NULL, prefers_sms_otp = NULL, group_ids = NULL, vpn_enabled = NULL, sso_disabled = NULL, otp_required_for_login = NULL, exempt_from_org_sms_otp_disabled = NULL, robot = NULL, phone = NULL, password = NULL, account_status = NULL) { args <- as.list(match.call())[-1] path <- "/users/{id}" path_params <- list(id = id) query_params <- list() - body_params <- list(name = name, email = email, active = active, primaryGroupId = primary_group_id, city = city, state = state, timeZone = time_zone, initials = initials, department = department, title = title, prefersSmsOtp = prefers_sms_otp, groupIds = group_ids, vpnEnabled = vpn_enabled, ssoDisabled = sso_disabled, otpRequiredForLogin = otp_required_for_login, exemptFromOrgSmsOtpDisabled = exempt_from_org_sms_otp_disabled, robot = robot, phone = phone, password = password) + body_params <- list(name = name, email = email, active = active, primaryGroupId = primary_group_id, city = city, state = state, timeZone = time_zone, initials = initials, department = department, title = title, prefersSmsOtp = prefers_sms_otp, groupIds = group_ids, vpnEnabled = vpn_enabled, ssoDisabled = sso_disabled, otpRequiredForLogin = otp_required_for_login, exemptFromOrgSmsOtpDisabled = exempt_from_org_sms_otp_disabled, robot = robot, phone = phone, password = password, accountStatus = account_status) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -31491,26 +36128,70 @@ users_list_api_keys <- function(id, limit = NULL, page_num = NULL, order = NULL, #' \item patchAllowed boolean, Whether the constraint allows PATCH requests. #' \item deleteAllowed boolean, Whether the constraint allows DELETE requests. #' }} -#' \item{token}{string, The API key.} +#' \item{token}{string, The API key.} +#' @export +users_post_api_keys <- function(id, expires_in, name, constraints = NULL) { + + args <- as.list(match.call())[-1] + path <- "/users/{id}/api_keys" + path_params <- list(id = id) + query_params <- list() + body_params <- list(expiresIn = expires_in, name = name, constraints = constraints) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Show the specified API key +#' @param id string required. The ID of the user or 'me'. +#' @param key_id integer required. The ID of the API key. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of the API key.} +#' \item{name}{string, The name of the API key.} +#' \item{expiresAt}{string, The date and time when the key expired.} +#' \item{createdAt}{string, The date and time when the key was created.} +#' \item{revokedAt}{string, The date and time when the key was revoked.} +#' \item{lastUsedAt}{string, The date and time when the key was last used.} +#' \item{scopes}{array, The scopes which the key is permissioned on.} +#' \item{useCount}{integer, The number of times the key has been used.} +#' \item{expired}{boolean, True if the key has expired.} +#' \item{active}{boolean, True if the key has neither expired nor been revoked.} +#' \item{constraints}{array, An array containing the following fields: +#' \itemize{ +#' \item constraint string, The path matcher of the constraint. +#' \item constraintType string, The type of constraint (exact/prefix/regex/verb). +#' \item getAllowed boolean, Whether the constraint allows GET requests. +#' \item headAllowed boolean, Whether the constraint allows HEAD requests. +#' \item postAllowed boolean, Whether the constraint allows POST requests. +#' \item putAllowed boolean, Whether the constraint allows PUT requests. +#' \item patchAllowed boolean, Whether the constraint allows PATCH requests. +#' \item deleteAllowed boolean, Whether the constraint allows DELETE requests. +#' }} #' @export -users_post_api_keys <- function(id, expires_in, name, constraints = NULL) { +users_get_api_keys <- function(id, key_id) { args <- as.list(match.call())[-1] - path <- "/users/{id}/api_keys" - path_params <- list(id = id) + path <- "/users/{id}/api_keys/{key_id}" + path_params <- list(id = id, key_id = key_id) query_params <- list() - body_params <- list(expiresIn = expires_in, name = name, constraints = constraints) + body_params <- list() path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("POST", path, path_params, query_params, body_params) + resp <- call_api("GET", path, path_params, query_params, body_params) return(resp) } -#' Show the specified API key +#' Revoke the specified API key #' @param id string required. The ID of the user or 'me'. #' @param key_id integer required. The ID of the API key. #' @@ -31537,7 +36218,7 @@ users_post_api_keys <- function(id, expires_in, name, constraints = NULL) { #' \item deleteAllowed boolean, Whether the constraint allows DELETE requests. #' }} #' @export -users_get_api_keys <- function(id, key_id) { +users_delete_api_keys <- function(id, key_id) { args <- as.list(match.call())[-1] path <- "/users/{id}/api_keys/{key_id}" @@ -31547,45 +36228,65 @@ users_get_api_keys <- function(id, key_id) { path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] - resp <- call_api("GET", path, path_params, query_params, body_params) + resp <- call_api("DELETE", path, path_params, query_params, body_params) return(resp) } -#' Revoke the specified API key -#' @param id string required. The ID of the user or 'me'. -#' @param key_id integer required. The ID of the API key. +#' Terminate all of the user's active sessions (must be a team or org admin) +#' @param id integer required. The ID of this user. #' #' @return A list containing the following elements: -#' \item{id}{integer, The ID of the API key.} -#' \item{name}{string, The name of the API key.} -#' \item{expiresAt}{string, The date and time when the key expired.} -#' \item{createdAt}{string, The date and time when the key was created.} -#' \item{revokedAt}{string, The date and time when the key was revoked.} -#' \item{lastUsedAt}{string, The date and time when the key was last used.} -#' \item{scopes}{array, The scopes which the key is permissioned on.} -#' \item{useCount}{integer, The number of times the key has been used.} -#' \item{expired}{boolean, True if the key has expired.} -#' \item{active}{boolean, True if the key has neither expired nor been revoked.} -#' \item{constraints}{array, An array containing the following fields: +#' \item{id}{integer, The ID of this user.} +#' \item{user}{string, The username of this user.} +#' \item{name}{string, The name of this user.} +#' \item{email}{string, The email of this user.} +#' \item{active}{boolean, Whether this user account is active or deactivated.} +#' \item{primaryGroupId}{integer, The ID of the primary group of this user.} +#' \item{groups}{array, An array containing the following fields: #' \itemize{ -#' \item constraint string, The path matcher of the constraint. -#' \item constraintType string, The type of constraint (exact/prefix/regex/verb). -#' \item getAllowed boolean, Whether the constraint allows GET requests. -#' \item headAllowed boolean, Whether the constraint allows HEAD requests. -#' \item postAllowed boolean, Whether the constraint allows POST requests. -#' \item putAllowed boolean, Whether the constraint allows PUT requests. -#' \item patchAllowed boolean, Whether the constraint allows PATCH requests. -#' \item deleteAllowed boolean, Whether the constraint allows DELETE requests. +#' \item id integer, The ID of this group. +#' \item name string, The name of this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. #' }} +#' \item{city}{string, The city of this user.} +#' \item{state}{string, The state of this user.} +#' \item{timeZone}{string, The time zone of this user.} +#' \item{initials}{string, The initials of this user.} +#' \item{department}{string, The department of this user.} +#' \item{title}{string, The title of this user.} +#' \item{githubUsername}{string, The GitHub username of this user.} +#' \item{prefersSmsOtp}{boolean, The preference for phone authorization of this user} +#' \item{vpnEnabled}{boolean, The availability of vpn for this user.} +#' \item{ssoDisabled}{boolean, The availability of SSO for this user.} +#' \item{otpRequiredForLogin}{boolean, The two factor authentication requirement for this user.} +#' \item{exemptFromOrgSmsOtpDisabled}{boolean, Whether the user has SMS OTP enabled on an individual level. This field does not matter if the org does not have SMS OTP disabled.} +#' \item{smsOtpAllowed}{boolean, Whether the user is allowed to receive two factor authentication codes via SMS.} +#' \item{robot}{boolean, Whether the user is a robot.} +#' \item{phone}{string, The phone number of this user.} +#' \item{organizationSlug}{string, The slug of the organization the user belongs to.} +#' \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} +#' \item{organizationLoginType}{string, The user's organization's login type.} +#' \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{createdAt}{string, The date and time when the user was created.} +#' \item{updatedAt}{string, The date and time when the user was last updated.} +#' \item{lastSeenAt}{string, The date and time when the user last visited Platform.} +#' \item{suspended}{boolean, Whether the user is suspended due to inactivity.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +#' \item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +#' \item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} #' @export -users_delete_api_keys <- function(id, key_id) { +users_delete_sessions <- function(id) { args <- as.list(match.call())[-1] - path <- "/users/{id}/api_keys/{key_id}" - path_params <- list(id = id, key_id = key_id) + path <- "/users/{id}/sessions" + path_params <- list(id = id) query_params <- list() body_params <- list() path_params <- path_params[match_params(path_params, args)] @@ -31680,10 +36381,99 @@ users_delete_me_favorites <- function(id) { } +#' Unsuspends user +#' @param id integer required. The ID of this user. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this user.} +#' \item{user}{string, The username of this user.} +#' \item{unlockedAt}{string, The time the user's account was unsuspended} +#' @export +users_post_unsuspend <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/users/{id}/unsuspend" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Wipes the user's current 2FA settings so that they must reset them upon next login +#' @param id integer required. The ID of this user. +#' +#' @return A list containing the following elements: +#' \item{id}{integer, The ID of this user.} +#' \item{user}{string, The username of this user.} +#' \item{name}{string, The name of this user.} +#' \item{email}{string, The email of this user.} +#' \item{active}{boolean, Whether this user account is active or deactivated.} +#' \item{primaryGroupId}{integer, The ID of the primary group of this user.} +#' \item{groups}{array, An array containing the following fields: +#' \itemize{ +#' \item id integer, The ID of this group. +#' \item name string, The name of this group. +#' \item slug string, The slug of this group. +#' \item organizationId integer, The ID of the organization associated with this group. +#' \item organizationName string, The name of the organization associated with this group. +#' }} +#' \item{city}{string, The city of this user.} +#' \item{state}{string, The state of this user.} +#' \item{timeZone}{string, The time zone of this user.} +#' \item{initials}{string, The initials of this user.} +#' \item{department}{string, The department of this user.} +#' \item{title}{string, The title of this user.} +#' \item{githubUsername}{string, The GitHub username of this user.} +#' \item{prefersSmsOtp}{boolean, The preference for phone authorization of this user} +#' \item{vpnEnabled}{boolean, The availability of vpn for this user.} +#' \item{ssoDisabled}{boolean, The availability of SSO for this user.} +#' \item{otpRequiredForLogin}{boolean, The two factor authentication requirement for this user.} +#' \item{exemptFromOrgSmsOtpDisabled}{boolean, Whether the user has SMS OTP enabled on an individual level. This field does not matter if the org does not have SMS OTP disabled.} +#' \item{smsOtpAllowed}{boolean, Whether the user is allowed to receive two factor authentication codes via SMS.} +#' \item{robot}{boolean, Whether the user is a robot.} +#' \item{phone}{string, The phone number of this user.} +#' \item{organizationSlug}{string, The slug of the organization the user belongs to.} +#' \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} +#' \item{organizationLoginType}{string, The user's organization's login type.} +#' \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +#' \item{createdAt}{string, The date and time when the user was created.} +#' \item{updatedAt}{string, The date and time when the user was last updated.} +#' \item{lastSeenAt}{string, The date and time when the user last visited Platform.} +#' \item{suspended}{boolean, Whether the user is suspended due to inactivity.} +#' \item{createdById}{integer, The ID of the user who created this user.} +#' \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +#' \item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +#' \item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} +#' @export +users_delete_2fa <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/users/{id}/2fa" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("DELETE", path, path_params, query_params, body_params) + + return(resp) + + } + + #' List Workflows #' @param hidden boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items. #' @param archived string optional. The archival status of the requested item(s). -#' @param author string optional. Author of the workflow. It accepts a comma-separated list of author ids. +#' @param author string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs. #' @param state array optional. State of the most recent execution.One or more of queued, running, succeeded, failed, cancelled, idle, and scheduled. #' @param scheduled boolean optional. If the workflow is scheduled. #' @param limit integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50. @@ -31709,10 +36499,11 @@ users_delete_me_favorites <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} #' \item{timeZone}{string, The time zone of this workflow.} @@ -31746,10 +36537,11 @@ workflows_list <- function(hidden = NULL, archived = NULL, author = NULL, state #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param allow_concurrent_executions boolean optional. Whether the workflow can execute when already running. #' @param time_zone string optional. The time zone of this workflow. @@ -31786,10 +36578,11 @@ workflows_list <- function(hidden = NULL, archived = NULL, author = NULL, state #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} #' \item{timeZone}{string, The time zone of this workflow.} @@ -31807,6 +36600,7 @@ workflows_list <- function(hidden = NULL, archived = NULL, author = NULL, state #' }} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' @export @@ -31850,10 +36644,11 @@ workflows_post <- function(name, description = NULL, from_job_chain = NULL, defi #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} #' \item{timeZone}{string, The time zone of this workflow.} @@ -31871,6 +36666,7 @@ workflows_post <- function(name, description = NULL, from_job_chain = NULL, defi #' }} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' @export @@ -31899,10 +36695,11 @@ workflows_get <- function(id) { #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param allow_concurrent_executions boolean optional. Whether the workflow can execute when already running. #' @param time_zone string optional. The time zone of this workflow. @@ -31938,10 +36735,11 @@ workflows_get <- function(id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} #' \item{timeZone}{string, The time zone of this workflow.} @@ -31959,6 +36757,7 @@ workflows_get <- function(id) { #' }} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' @export @@ -31987,10 +36786,11 @@ workflows_put <- function(id, name, description = NULL, definition = NULL, sched #' @param schedule list optional. A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' } #' @param allow_concurrent_executions boolean optional. Whether the workflow can execute when already running. #' @param time_zone string optional. The time zone of this workflow. @@ -32026,10 +36826,11 @@ workflows_put <- function(id, name, description = NULL, definition = NULL, sched #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} #' \item{timeZone}{string, The time zone of this workflow.} @@ -32047,6 +36848,7 @@ workflows_put <- function(id, name, description = NULL, definition = NULL, sched #' }} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' @export @@ -32238,6 +37040,70 @@ workflows_delete_shares_groups <- function(id, group_id) { } +#' List dependent objects for this object +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer optional. ID of target user +#' +#' @return An array containing the following fields: +#' \item{objectType}{string, Dependent object type} +#' \item{fcoType}{string, Human readable dependent object type} +#' \item{id}{integer, Dependent object ID} +#' \item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +#' \item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +#' \item{shareable}{boolean, Whether or not the requesting user can share this object.} +#' @export +workflows_list_dependencies <- function(id, user_id = NULL) { + + args <- as.list(match.call())[-1] + path <- "/workflows/{id}/dependencies" + path_params <- list(id = id) + query_params <- list(user_id = user_id) + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("GET", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Transfer ownership of this object to another user +#' @param id integer required. The ID of the resource that is shared. +#' @param user_id integer required. ID of target user +#' @param include_dependencies boolean required. Whether or not to give manage permissions on all dependencies +#' @param email_body string optional. Custom body text for e-mail sent on transfer. +#' @param send_email boolean optional. Send email to the target user of the transfer? +#' +#' @return A list containing the following elements: +#' \item{dependencies}{array, An array containing the following fields: +#' \itemize{ +#' \item objectType string, Dependent object type +#' \item fcoType string, Human readable dependent object type +#' \item id integer, Dependent object ID +#' \item name string, Dependent object name, or nil if the requesting user cannot read this object +#' \item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +#' \item shared boolean, Whether dependent object was successfully shared with target user +#' }} +#' @export +workflows_put_transfer <- function(id, user_id, include_dependencies, email_body = NULL, send_email = NULL) { + + args <- as.list(match.call())[-1] + path <- "/workflows/{id}/transfer" + path_params <- list(id = id) + query_params <- list() + body_params <- list(userId = user_id, includeDependencies = include_dependencies, emailBody = email_body, sendEmail = send_email) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PUT", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Update the archive status of this object #' @param id integer required. The ID of the object. #' @param status boolean required. The desired archived status of the object. @@ -32262,10 +37128,11 @@ workflows_delete_shares_groups <- function(id, group_id) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} #' \item{timeZone}{string, The time zone of this workflow.} @@ -32283,6 +37150,7 @@ workflows_delete_shares_groups <- function(id, group_id) { #' }} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' @export @@ -32399,7 +37267,7 @@ workflows_delete_projects <- function(id, project_id) { #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -32409,7 +37277,8 @@ workflows_delete_projects <- function(id, project_id) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export workflows_list_git <- function(id) { @@ -32434,10 +37303,11 @@ workflows_list_git <- function(id) { #' @param git_branch string optional. The git branch that the file is on. #' @param git_path string optional. The path of the file in the repository. #' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. #' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. #' #' @return A list containing the following elements: -#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} #' \item{gitBranch}{string, The git branch that the file is on.} #' \item{gitPath}{string, The path of the file in the repository.} #' \item{gitRepo}{list, A list containing the following elements: @@ -32447,15 +37317,16 @@ workflows_list_git <- function(id) { #' \item createdAt string, #' \item updatedAt string, #' }} -#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} #' @export -workflows_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, pull_from_git = NULL) { +workflows_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { args <- as.list(match.call())[-1] path <- "/workflows/{id}/git" path_params <- list(id = id) query_params <- list() - body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, pullFromGit = pull_from_git) + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) path_params <- path_params[match_params(path_params, args)] query_params <- query_params[match_params(query_params, args)] body_params <- body_params[match_params(body_params, args)] @@ -32466,7 +37337,47 @@ workflows_put_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = } -#' Get the git commits for an item +#' Update an attached git file +#' @param id integer required. The ID of the file. +#' @param git_ref string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit. +#' @param git_branch string optional. The git branch that the file is on. +#' @param git_path string optional. The path of the file in the repository. +#' @param git_repo_url string optional. The URL of the git repository. +#' @param git_ref_type string optional. Specifies if the file is versioned by branch or tag. +#' @param pull_from_git boolean optional. Automatically pull latest commit from git. Only works for scripts. +#' +#' @return A list containing the following elements: +#' \item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +#' \item{gitBranch}{string, The git branch that the file is on.} +#' \item{gitPath}{string, The path of the file in the repository.} +#' \item{gitRepo}{list, A list containing the following elements: +#' \itemize{ +#' \item id integer, The ID for this git repository. +#' \item repoUrl string, The URL for this git repository. +#' \item createdAt string, +#' \item updatedAt string, +#' }} +#' \item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +#' \item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +#' @export +workflows_patch_git <- function(id, git_ref = NULL, git_branch = NULL, git_path = NULL, git_repo_url = NULL, git_ref_type = NULL, pull_from_git = NULL) { + + args <- as.list(match.call())[-1] + path <- "/workflows/{id}/git" + path_params <- list(id = id) + query_params <- list() + body_params <- list(gitRef = git_ref, gitBranch = git_branch, gitPath = git_path, gitRepoUrl = git_repo_url, gitRefType = git_ref_type, pullFromGit = pull_from_git) + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("PATCH", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Get the git commits for an item on the current branch #' @param id integer required. The ID of the file. #' #' @return A list containing the following elements: @@ -32521,7 +37432,7 @@ workflows_post_git_commits <- function(id, content, message, file_hash) { } -#' Get file contents at commit_hash +#' Get file contents at git ref #' @param id integer required. The ID of the file. #' @param commit_hash string required. The SHA (full or shortened) of the desired git commit. #' @@ -32548,6 +37459,58 @@ workflows_get_git_commits <- function(id, commit_hash) { } +#' Checkout latest commit on the current branch of a script or workflow +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +workflows_post_git_checkout_latest <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/workflows/{id}/git/checkout-latest" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + +#' Checkout content that the existing git_ref points to and save to the object +#' @param id integer required. The ID of the file. +#' +#' @return A list containing the following elements: +#' \item{content}{string, The file's contents.} +#' \item{type}{string, The file's type.} +#' \item{size}{integer, The file's size.} +#' \item{fileHash}{string, The SHA of the file.} +#' @export +workflows_post_git_checkout <- function(id) { + + args <- as.list(match.call())[-1] + path <- "/workflows/{id}/git/checkout" + path_params <- list(id = id) + query_params <- list() + body_params <- list() + path_params <- path_params[match_params(path_params, args)] + query_params <- query_params[match_params(query_params, args)] + body_params <- body_params[match_params(body_params, args)] + resp <- call_api("POST", path, path_params, query_params, body_params) + + return(resp) + + } + + #' Clone this Workflow #' @param id integer required. The ID for the workflow. #' @param clone_schedule boolean optional. If true, also copy the schedule to the new workflow. @@ -32573,10 +37536,11 @@ workflows_get_git_commits <- function(id, commit_hash) { #' \item{schedule}{list, A list containing the following elements: #' \itemize{ #' \item scheduled boolean, If the item is scheduled. -#' \item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +#' \item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth #' \item scheduledHours array, Hours of the day it is scheduled on. #' \item scheduledMinutes array, Minutes of the day it is scheduled on. -#' \item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +#' \item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +#' \item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. #' }} #' \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} #' \item{timeZone}{string, The time zone of this workflow.} @@ -32594,6 +37558,7 @@ workflows_get_git_commits <- function(id, commit_hash) { #' }} #' \item{archived}{string, The archival status of the requested item(s).} #' \item{hidden}{boolean, The hidden status of the item.} +#' \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} #' \item{createdAt}{string, } #' \item{updatedAt}{string, } #' @export @@ -32660,7 +37625,7 @@ workflows_list_executions <- function(id, limit = NULL, page_num = NULL, order = #' @param id integer required. The ID for the workflow. #' @param target_task string optional. For a reverse workflow, the name of the task to target. #' @param input list optional. Key-value pairs to send to this execution as inputs. -#' @param included_tasks array optional. If specified, executes only the subset of workflow tasks included. +#' @param included_tasks array optional. If specified, executes only the subset of workflow tasks included as specified by task name. #' #' @return A list containing the following elements: #' \item{id}{integer, The ID for this workflow execution.} @@ -32922,6 +37887,7 @@ workflows_post_executions_retry <- function(id, execution_id, task_name = NULL) #' \itemize{ #' \item id integer, The ID of the run. #' \item jobId integer, The ID of the job associated with the run. +#' \item myPermissionLevel string, Your permission level on the job. One of "read", "write", "manage", or "nil". #' \item state string, The state of the run. #' \item createdAt string, The time that the run was queued. #' \item startedAt string, The time that the run started. @@ -32931,6 +37897,7 @@ workflows_post_executions_retry <- function(id, execution_id, task_name = NULL) #' \itemize{ #' \item id integer, The ID of the execution. #' \item workflowId integer, The ID of the workflow associated with the execution. +#' \item myPermissionLevel string, Your permission level on the workflow. One of "read", "write", "manage", or "nil". #' \item state string, The state of this workflow execution. #' \item createdAt string, The time this execution was created. #' \item startedAt string, The time this execution started. diff --git a/README.md b/README.md index f8a6a55e..49a4c751 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ print(team_members) ## API Documentation -[https://platform.civisanalytics.com/api](https://platform.civisanalytics.com/api) +[https://platform.civisanalytics.com/#/api](https://platform.civisanalytics.com/#/api) ## Contributing @@ -172,4 +172,4 @@ person("FirstName", "LastName", email = "email@email.com", role = "ctb") ``` This project is intended to be a safe, welcoming space for collaboration, and -contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct. +contributors are expected to adhere to the [Contributor Covenant](http://www.contributor-covenant.org) code of conduct. diff --git a/man/admin_list_organizations.Rd b/man/admin_list_organizations.Rd index 5f0a14b9..a4ce1bfb 100644 --- a/man/admin_list_organizations.Rd +++ b/man/admin_list_organizations.Rd @@ -28,6 +28,10 @@ An array containing the following fields: \item{csMetadata}{string, Additional metadata about the organization in JSON format.} \item{removeFooterInEmails}{boolean, If true, emails sent by platform will not include Civis text.} \item{salesforceAccountId}{string, The SalesForce Account ID for this organization.} +\item{tableauSiteId}{string, The Tableau Site ID for this organization.} +\item{fedrampEnabled}{boolean, Flag denoting whether this organization is FedRAMP compliant.} +\item{createdById}{integer, The ID of the user who created this organization} +\item{lastUpdatedById}{integer, The ID of the user who last updated this organization} \item{advancedSettings}{list, A list containing the following elements: \itemize{ \item dedicatedDjPoolEnabled boolean, If true, the Organization has a dedicated delayed jobs pool. Defaults to false. diff --git a/man/aliases_get.Rd b/man/aliases_get.Rd index a3d3ad89..1644033a 100644 --- a/man/aliases_get.Rd +++ b/man/aliases_get.Rd @@ -13,7 +13,7 @@ aliases_get(id) A list containing the following elements: \item{id}{integer, The id of the Alias object.} \item{objectId}{integer, The id of the object} -\item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string, The alias of the object} \item{userId}{integer, The id of the user who created the alias} \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} diff --git a/man/aliases_get_object_type.Rd b/man/aliases_get_object_type.Rd index 3c5de381..bc94a72d 100644 --- a/man/aliases_get_object_type.Rd +++ b/man/aliases_get_object_type.Rd @@ -7,7 +7,7 @@ aliases_get_object_type(object_type, alias) } \arguments{ -\item{object_type}{string required. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{object_type}{string required. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string required. The alias of the object} } @@ -15,7 +15,7 @@ aliases_get_object_type(object_type, alias) A list containing the following elements: \item{id}{integer, The id of the Alias object.} \item{objectId}{integer, The id of the object} -\item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string, The alias of the object} \item{userId}{integer, The id of the user who created the alias} \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} diff --git a/man/aliases_list.Rd b/man/aliases_list.Rd index d38973aa..6f5ed86f 100644 --- a/man/aliases_list.Rd +++ b/man/aliases_list.Rd @@ -13,7 +13,7 @@ aliases_list( ) } \arguments{ -\item{object_type}{string optional. Filter results by object type. Pass multiple object types with a comma-separatedlist. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{object_type}{string optional. Filter results by object type. Pass multiple object types with a comma-separatedlist. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} @@ -27,7 +27,7 @@ aliases_list( An array containing the following fields: \item{id}{integer, The id of the Alias object.} \item{objectId}{integer, The id of the object} -\item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string, The alias of the object} \item{userId}{integer, The id of the user who created the alias} \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} diff --git a/man/aliases_list_dependencies.Rd b/man/aliases_list_dependencies.Rd new file mode 100644 index 00000000..d0b9d1ad --- /dev/null +++ b/man/aliases_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{aliases_list_dependencies} +\alias{aliases_list_dependencies} +\title{List dependent objects for this object} +\usage{ +aliases_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/aliases_patch.Rd b/man/aliases_patch.Rd index b20df3b9..fcf7ee51 100644 --- a/man/aliases_patch.Rd +++ b/man/aliases_patch.Rd @@ -17,7 +17,7 @@ aliases_patch( \item{object_id}{integer optional. The id of the object} -\item{object_type}{string optional. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{object_type}{string optional. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string optional. The alias of the object} @@ -27,7 +27,7 @@ aliases_patch( A list containing the following elements: \item{id}{integer, The id of the Alias object.} \item{objectId}{integer, The id of the object} -\item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string, The alias of the object} \item{userId}{integer, The id of the user who created the alias} \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} diff --git a/man/aliases_post.Rd b/man/aliases_post.Rd index 278ef7f4..1f06a9a2 100644 --- a/man/aliases_post.Rd +++ b/man/aliases_post.Rd @@ -9,7 +9,7 @@ aliases_post(object_id, object_type, alias, display_name = NULL) \arguments{ \item{object_id}{integer required. The id of the object} -\item{object_type}{string required. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{object_type}{string required. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string required. The alias of the object} @@ -19,7 +19,7 @@ aliases_post(object_id, object_type, alias, display_name = NULL) A list containing the following elements: \item{id}{integer, The id of the Alias object.} \item{objectId}{integer, The id of the object} -\item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string, The alias of the object} \item{userId}{integer, The id of the user who created the alias} \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} diff --git a/man/aliases_put.Rd b/man/aliases_put.Rd index 8ea72c5b..0f55acb3 100644 --- a/man/aliases_put.Rd +++ b/man/aliases_put.Rd @@ -11,7 +11,7 @@ aliases_put(id, object_id, object_type, alias, display_name = NULL) \item{object_id}{integer required. The id of the object} -\item{object_type}{string required. The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{object_type}{string required. The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string required. The alias of the object} @@ -21,7 +21,7 @@ aliases_put(id, object_id, object_type, alias, display_name = NULL) A list containing the following elements: \item{id}{integer, The id of the Alias object.} \item{objectId}{integer, The id of the object} -\item{objectType}{string, The type of the object. Valid types include: model, cass_ncoa, container_script, gdoc_export, geocode, media_optimizer, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} +\item{objectType}{string, The type of the object. Valid types include: cass_ncoa, container_script, geocode, python_script, r_script, salesforce_export, javascript_script, sql_script, project, notebook, workflow, template_script, template_report, service, report, tableau and service_report.} \item{alias}{string, The alias of the object} \item{userId}{integer, The id of the user who created the alias} \item{displayName}{string, The display name of the Alias object. Defaults to object name if not provided.} diff --git a/man/aliases_put_transfer.Rd b/man/aliases_put_transfer.Rd new file mode 100644 index 00000000..cf0e9a69 --- /dev/null +++ b/man/aliases_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{aliases_put_transfer} +\alias{aliases_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +aliases_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/apps_delete_instances_projects.Rd b/man/apps_delete_instances_projects.Rd deleted file mode 100644 index 12abbcb6..00000000 --- a/man/apps_delete_instances_projects.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_delete_instances_projects} -\alias{apps_delete_instances_projects} -\title{Remove an App Instance from a project} -\usage{ -apps_delete_instances_projects(id, project_id, slug) -} -\arguments{ -\item{id}{integer required. The ID of the App Instance.} - -\item{project_id}{integer required. The ID of the project.} - -\item{slug}{string required. The slug for the application.} -} -\value{ -An empty HTTP response -} -\description{ -Remove an App Instance from a project -} diff --git a/man/apps_get.Rd b/man/apps_get.Rd deleted file mode 100644 index bc800592..00000000 --- a/man/apps_get.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_get} -\alias{apps_get} -\title{List details of a Decision Application} -\usage{ -apps_get(slug) -} -\arguments{ -\item{slug}{string required. The slug for the application.} -} -\value{ -A list containing the following elements: -\item{slug}{string, The slug for the application.} -\item{id}{integer, The unique id of the application.} -\item{instanceName}{string, A word that describes an instance of this app.} -\item{name}{string, The name of the application.} -\item{currentRelease}{list, A list containing the following elements: -\itemize{ -\item id integer, The unique id of the release. -\item appId integer, The id of the app the release belongs to. -\item reportTemplateId integer, ID of the report template for this release. -\item resources object, A hash of resources associated with this release. -\item archived string, The archival status of the requested item(s). -}} -\item{features}{list, App features.} -} -\description{ -List details of a Decision Application -} diff --git a/man/apps_get_instances.Rd b/man/apps_get_instances.Rd deleted file mode 100644 index d8903569..00000000 --- a/man/apps_get_instances.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_get_instances} -\alias{apps_get_instances} -\title{Return a given app instance} -\usage{ -apps_get_instances(id, slug) -} -\arguments{ -\item{id}{integer required. The unique id of the instance.} - -\item{slug}{string required. The slug for the application.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the instance.} -\item{name}{string, The name of the instance.} -\item{appReleaseId}{integer, The id of the app release the instance belongs to.} -\item{reportId}{integer, The id of the report the instance belongs to.} -\item{createdAt}{string, The time the instance was created at.} -\item{user}{list, A list containing the following elements: -\itemize{ -\item id integer, The ID of this user. -\item name string, This user's name. -\item username string, This user's username. -\item initials string, This user's initials. -\item online boolean, Whether this user is online. -}} -\item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -\item{authCodeUrl}{string, } -\item{apiKey}{string, A Civis API key that can be used by this app instance.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Return a given app instance -} diff --git a/man/apps_get_releases.Rd b/man/apps_get_releases.Rd deleted file mode 100644 index 211d6bed..00000000 --- a/man/apps_get_releases.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_get_releases} -\alias{apps_get_releases} -\title{Return a given app release} -\usage{ -apps_get_releases(id, slug) -} -\arguments{ -\item{id}{integer required. The unique id of the release.} - -\item{slug}{string required. The slug for the application.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the release.} -\item{appId}{integer, The id of the app the release belongs to.} -\item{reportTemplateId}{integer, ID of the report template for this release.} -\item{resources}{list, A hash of resources associated with this release.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Return a given app release -} diff --git a/man/apps_list.Rd b/man/apps_list.Rd deleted file mode 100644 index 3c02db8f..00000000 --- a/man/apps_list.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_list} -\alias{apps_list} -\title{List apps} -\usage{ -apps_list() -} -\value{ -An array containing the following fields: -\item{slug}{string, The slug for the application.} -\item{id}{integer, The unique id of the application.} -\item{instanceName}{string, A word that describes an instance of this app.} -\item{name}{string, The name of the application.} -} -\description{ -List apps -} diff --git a/man/apps_list_instances.Rd b/man/apps_list_instances.Rd deleted file mode 100644 index 70531bc6..00000000 --- a/man/apps_list_instances.Rd +++ /dev/null @@ -1,52 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_list_instances} -\alias{apps_list_instances} -\title{List the instances of a Decision Application} -\usage{ -apps_list_instances( - slug, - archived = NULL, - app_release_id = NULL, - limit = NULL, - page_num = NULL, - order = NULL, - order_dir = NULL -) -} -\arguments{ -\item{slug}{string required. The slug for the application.} - -\item{archived}{string optional. The archival status of the requested item(s).} - -\item{app_release_id}{integer optional. If supplied, return only instances matching this release.} - -\item{limit}{integer optional. Number of results to return. Defaults to its maximum of 50.} - -\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} - -\item{order}{string optional. The field on which to order the result set. Defaults to id. Must be one of: id, created_at.} - -\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc.} -} -\value{ -An array containing the following fields: -\item{id}{integer, The unique id of the instance.} -\item{name}{string, The name of the instance.} -\item{appReleaseId}{integer, The id of the app release the instance belongs to.} -\item{reportId}{integer, The id of the report the instance belongs to.} -\item{createdAt}{string, The time the instance was created at.} -\item{user}{list, A list containing the following elements: -\itemize{ -\item id integer, The ID of this user. -\item name string, This user's name. -\item username string, This user's username. -\item initials string, This user's initials. -\item online boolean, Whether this user is online. -}} -\item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -List the instances of a Decision Application -} diff --git a/man/apps_list_instances_projects.Rd b/man/apps_list_instances_projects.Rd deleted file mode 100644 index 444b151d..00000000 --- a/man/apps_list_instances_projects.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_list_instances_projects} -\alias{apps_list_instances_projects} -\title{List the projects an App Instance belongs to} -\usage{ -apps_list_instances_projects(id, slug, hidden = NULL) -} -\arguments{ -\item{id}{integer required. The ID of the App Instance.} - -\item{slug}{string required. The slug for the application.} - -\item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} -} -\value{ -An array containing the following fields: -\item{id}{integer, The ID for this project.} -\item{author}{list, A list containing the following elements: -\itemize{ -\item id integer, The ID of this user. -\item name string, This user's name. -\item username string, This user's username. -\item initials string, This user's initials. -\item online boolean, Whether this user is online. -}} -\item{name}{string, The name of this project.} -\item{description}{string, A description of the project.} -\item{users}{array, An array containing the following fields: -\itemize{ -\item id integer, The ID of this user. -\item name string, This user's name. -\item username string, This user's username. -\item initials string, This user's initials. -\item online boolean, Whether this user is online. -}} -\item{autoShare}{boolean, } -\item{createdAt}{string, } -\item{updatedAt}{string, } -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -List the projects an App Instance belongs to -} diff --git a/man/apps_list_releases.Rd b/man/apps_list_releases.Rd deleted file mode 100644 index 6aac6690..00000000 --- a/man/apps_list_releases.Rd +++ /dev/null @@ -1,39 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_list_releases} -\alias{apps_list_releases} -\title{List the releases of a particular Decision Application} -\usage{ -apps_list_releases( - slug, - archived = NULL, - limit = NULL, - page_num = NULL, - order = NULL, - order_dir = NULL -) -} -\arguments{ -\item{slug}{string required. The slug for the application.} - -\item{archived}{string optional. The archival status of the requested item(s).} - -\item{limit}{integer optional. Number of results to return. Defaults to its maximum of 50.} - -\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} - -\item{order}{string optional. The field on which to order the result set. Defaults to id. Must be one of: id.} - -\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc.} -} -\value{ -An array containing the following fields: -\item{id}{integer, The unique id of the release.} -\item{appId}{integer, The id of the app the release belongs to.} -\item{reportTemplateId}{integer, ID of the report template for this release.} -\item{resources}{list, A hash of resources associated with this release.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -List the releases of a particular Decision Application -} diff --git a/man/apps_patch_instances.Rd b/man/apps_patch_instances.Rd deleted file mode 100644 index f7284943..00000000 --- a/man/apps_patch_instances.Rd +++ /dev/null @@ -1,38 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_patch_instances} -\alias{apps_patch_instances} -\title{Update a given app instance} -\usage{ -apps_patch_instances(id, slug, name = NULL) -} -\arguments{ -\item{id}{integer required. The unique id of the instance.} - -\item{slug}{string required. The slug for the application.} - -\item{name}{string optional. The name of the instance.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the instance.} -\item{name}{string, The name of the instance.} -\item{appReleaseId}{integer, The id of the app release the instance belongs to.} -\item{reportId}{integer, The id of the report the instance belongs to.} -\item{createdAt}{string, The time the instance was created at.} -\item{user}{list, A list containing the following elements: -\itemize{ -\item id integer, The ID of this user. -\item name string, This user's name. -\item username string, This user's username. -\item initials string, This user's initials. -\item online boolean, Whether this user is online. -}} -\item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -\item{authCodeUrl}{string, } -\item{apiKey}{string, A Civis API key that can be used by this app instance.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Update a given app instance -} diff --git a/man/apps_patch_releases.Rd b/man/apps_patch_releases.Rd deleted file mode 100644 index 747fa958..00000000 --- a/man/apps_patch_releases.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_patch_releases} -\alias{apps_patch_releases} -\title{Update an existing Decision Application release} -\usage{ -apps_patch_releases(slug, id, report_template_id = NULL, resources = NULL) -} -\arguments{ -\item{slug}{string required. The slug for the application.} - -\item{id}{integer required. The unique id of the release.} - -\item{report_template_id}{integer optional. ID of the report template for this release.} - -\item{resources}{list optional. A hash of resources associated with this release.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the release.} -\item{appId}{integer, The id of the app the release belongs to.} -\item{reportTemplateId}{integer, ID of the report template for this release.} -\item{resources}{list, A hash of resources associated with this release.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Update an existing Decision Application release -} diff --git a/man/apps_post_instances.Rd b/man/apps_post_instances.Rd deleted file mode 100644 index c6cd9937..00000000 --- a/man/apps_post_instances.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_post_instances} -\alias{apps_post_instances} -\title{Create a new instance of an application of the given slug} -\usage{ -apps_post_instances(slug, name = NULL) -} -\arguments{ -\item{slug}{string required. The slug for the application.} - -\item{name}{string optional. The name of the instance.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the instance.} -\item{name}{string, The name of the instance.} -\item{appReleaseId}{integer, The id of the app release the instance belongs to.} -\item{reportId}{integer, The id of the report the instance belongs to.} -\item{createdAt}{string, The time the instance was created at.} -\item{user}{list, A list containing the following elements: -\itemize{ -\item id integer, The ID of this user. -\item name string, This user's name. -\item username string, This user's username. -\item initials string, This user's initials. -\item online boolean, Whether this user is online. -}} -\item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -\item{authCodeUrl}{string, } -\item{apiKey}{string, A Civis API key that can be used by this app instance.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Create a new instance of an application of the given slug -} diff --git a/man/apps_post_releases.Rd b/man/apps_post_releases.Rd deleted file mode 100644 index 3182f5e1..00000000 --- a/man/apps_post_releases.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_post_releases} -\alias{apps_post_releases} -\title{Create a new Decision Application release} -\usage{ -apps_post_releases(slug, report_template_id, resources) -} -\arguments{ -\item{slug}{string required. The slug for the application.} - -\item{report_template_id}{integer required. ID of the report template for this release.} - -\item{resources}{list required. A hash of resources associated with this release.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the release.} -\item{appId}{integer, The id of the app the release belongs to.} -\item{reportTemplateId}{integer, ID of the report template for this release.} -\item{resources}{list, A hash of resources associated with this release.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Create a new Decision Application release -} diff --git a/man/apps_put_features.Rd b/man/apps_put_features.Rd deleted file mode 100644 index 659b464a..00000000 --- a/man/apps_put_features.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_put_features} -\alias{apps_put_features} -\title{Update the Decision Application features for a given organization} -\usage{ -apps_put_features(slug, org, features) -} -\arguments{ -\item{slug}{string required. The slug for the application.} - -\item{org}{string required. Organization.} - -\item{features}{list required. App features.} -} -\value{ -A list containing the following elements: -\item{slug}{string, The slug for the application.} -\item{id}{integer, The unique id of the application.} -\item{instanceName}{string, A word that describes an instance of this app.} -\item{name}{string, The name of the application.} -\item{currentRelease}{list, A list containing the following elements: -\itemize{ -\item id integer, The unique id of the release. -\item appId integer, The id of the app the release belongs to. -\item reportTemplateId integer, ID of the report template for this release. -\item resources object, A hash of resources associated with this release. -\item archived string, The archival status of the requested item(s). -}} -\item{features}{list, App features.} -} -\description{ -Update the Decision Application features for a given organization -} diff --git a/man/apps_put_instances_archive.Rd b/man/apps_put_instances_archive.Rd deleted file mode 100644 index e0478ddf..00000000 --- a/man/apps_put_instances_archive.Rd +++ /dev/null @@ -1,38 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_put_instances_archive} -\alias{apps_put_instances_archive} -\title{Update the archive status of this object} -\usage{ -apps_put_instances_archive(id, slug, status) -} -\arguments{ -\item{id}{integer required. The ID of the object.} - -\item{slug}{string required. The slug for the application.} - -\item{status}{boolean required. The desired archived status of the object.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the instance.} -\item{name}{string, The name of the instance.} -\item{appReleaseId}{integer, The id of the app release the instance belongs to.} -\item{reportId}{integer, The id of the report the instance belongs to.} -\item{createdAt}{string, The time the instance was created at.} -\item{user}{list, A list containing the following elements: -\itemize{ -\item id integer, The ID of this user. -\item name string, This user's name. -\item username string, This user's username. -\item initials string, This user's initials. -\item online boolean, Whether this user is online. -}} -\item{projectId}{integer, The id of the project collecting all the items that belong to this app instance.} -\item{authCodeUrl}{string, } -\item{apiKey}{string, A Civis API key that can be used by this app instance.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Update the archive status of this object -} diff --git a/man/apps_put_instances_projects.Rd b/man/apps_put_instances_projects.Rd deleted file mode 100644 index 14998206..00000000 --- a/man/apps_put_instances_projects.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_put_instances_projects} -\alias{apps_put_instances_projects} -\title{Add an App Instance to a project} -\usage{ -apps_put_instances_projects(id, project_id, slug) -} -\arguments{ -\item{id}{integer required. The ID of the App Instance.} - -\item{project_id}{integer required. The ID of the project.} - -\item{slug}{string required. The slug for the application.} -} -\value{ -An empty HTTP response -} -\description{ -Add an App Instance to a project -} diff --git a/man/apps_put_releases_archive.Rd b/man/apps_put_releases_archive.Rd deleted file mode 100644 index 0a96a6c9..00000000 --- a/man/apps_put_releases_archive.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{apps_put_releases_archive} -\alias{apps_put_releases_archive} -\title{Update the archive status of this object} -\usage{ -apps_put_releases_archive(id, slug, status) -} -\arguments{ -\item{id}{integer required. The ID of the object.} - -\item{slug}{string required. The slug for the application.} - -\item{status}{boolean required. The desired archived status of the object.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The unique id of the release.} -\item{appId}{integer, The id of the app the release belongs to.} -\item{reportTemplateId}{integer, ID of the report template for this release.} -\item{resources}{list, A hash of resources associated with this release.} -\item{archived}{string, The archival status of the requested item(s).} -} -\description{ -Update the archive status of this object -} diff --git a/man/civis_ml.Rd b/man/civis_ml.Rd index cead903b..73993b0d 100644 --- a/man/civis_ml.Rd +++ b/man/civis_ml.Rd @@ -200,24 +200,24 @@ start by imputing missing values with the mean of non-null values in a column. The \code{"sparse_*"} models include a LASSO regression step (using \code{glmnet}) to do feature selection before passing data to the final model. In some models, CivisML uses default parameters from those in -\href{http://scikit-learn.org/stable/}{Scikit-Learn}, as indicated in the "Altered Defaults" column. +\href{https://scikit-learn.org/stable/}{Scikit-Learn}, as indicated in the "Altered Defaults" column. All models also have \code{random_state=42}. Specific workflows can also be called directly using the R workflow functions. \tabular{rrrrr}{ Name \tab R Workflow \tab Model Type \tab Algorithm \tab Altered Defaults \cr - \code{sparse_logistic} \tab \code{\link{civis_ml_sparse_logistic}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html}{LogisticRegression} \tab \code{C=499999950, tol=1e-08} \cr - \code{gradient_boosting_classifier} \tab \code{\link{civis_ml_gradient_boosting_classifier}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html}{GradientBoostingClassifier} \tab \code{n_estimators=500, max_depth=2} \cr - \code{random_forest_classifier} \tab \code{\link{civis_ml_random_forest_classifier}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html}{RandomForestClassifier} \tab \code{n_estimators=500} \cr - \code{extra_trees_classifier} \tab \code{\link{civis_ml_extra_trees_classifier}} \tab classification \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html}{ExtraTreesClassifier} \tab \code{n_estimators=500} \cr + \code{sparse_logistic} \tab \code{\link{civis_ml_sparse_logistic}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html}{LogisticRegression} \tab \code{C=499999950, tol=1e-08} \cr + \code{gradient_boosting_classifier} \tab \code{\link{civis_ml_gradient_boosting_classifier}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html}{GradientBoostingClassifier} \tab \code{n_estimators=500, max_depth=2} \cr + \code{random_forest_classifier} \tab \code{\link{civis_ml_random_forest_classifier}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html}{RandomForestClassifier} \tab \code{n_estimators=500} \cr + \code{extra_trees_classifier} \tab \code{\link{civis_ml_extra_trees_classifier}} \tab classification \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html}{ExtraTreesClassifier} \tab \code{n_estimators=500} \cr \code{multilayer_perceptron_classifier} \tab \tab classification \tab \href{https://github.com/civisanalytics/muffnn}{muffnn.MLPClassifier} \tab \cr \code{stacking_classifier} \tab \tab classification \tab \href{https://github.com/civisanalytics/civisml-extensions}{StackedClassifier}\tab \cr - \code{sparse_linear_regressor} \tab \code{\link{civis_ml_sparse_linear_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html}{LinearRegression} \tab \cr - \code{sparse_ridge_regressor} \tab \code{\link{civis_ml_sparse_ridge_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html}{Ridge} \tab \cr - \code{gradient_boosting_regressor} \tab \code{\link{civis_ml_gradient_boosting_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html}{GradientBoostingRegressor} \tab \code{n_estimators=500, max_depth=2} \cr - \code{random_forest_regressor} \tab \code{\link{civis_ml_random_forest_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html}{RandomForestRegressor} \tab \code{n_estimators=500} \cr - \code{extra_trees_regressor} \tab \code{\link{civis_ml_extra_trees_regressor}} \tab regression \tab \href{http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html}{ExtraTreesRegressor} \tab \code{n_estimators=500} \cr + \code{sparse_linear_regressor} \tab \code{\link{civis_ml_sparse_linear_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html}{LinearRegression} \tab \cr + \code{sparse_ridge_regressor} \tab \code{\link{civis_ml_sparse_ridge_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html}{Ridge} \tab \cr + \code{gradient_boosting_regressor} \tab \code{\link{civis_ml_gradient_boosting_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html}{GradientBoostingRegressor} \tab \code{n_estimators=500, max_depth=2} \cr + \code{random_forest_regressor} \tab \code{\link{civis_ml_random_forest_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html}{RandomForestRegressor} \tab \code{n_estimators=500} \cr + \code{extra_trees_regressor} \tab \code{\link{civis_ml_extra_trees_regressor}} \tab regression \tab \href{https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html}{ExtraTreesRegressor} \tab \code{n_estimators=500} \cr \code{multilayer_perceptron_regressor} \tab \tab regression \tab \href{https://github.com/civisanalytics/muffnn}{muffnn.MLPRegressor} \tab \cr \code{stacking_regressor} \tab \tab regression \tab \href{https://github.com/civisanalytics/civisml-extensions}{StackedRegressor}\tab \cr } @@ -231,9 +231,9 @@ The \code{"stacking_classifier"} model stacks together the \code{"gradient_boost \code{"random_forest_classifier"} predefined models together with a \code{glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='log_loss')}. Defaults for the predefined models are documented in \code{?civis_ml}. Each column is first -\href{http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html}{standardized}, +\href{https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html}{standardized}, and then the model predictions are combined using -\href{http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html}{LogisticRegressionCV} +\href{https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html}{LogisticRegressionCV} with \code{penalty='l2'} and \code{tol=1e-08}. The \code{"stacking_regressor"} works similarly, stacking together the \code{"gradient_boosting_regressor"} and \code{"random_forest_regressor"} models and a \code{glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2')}, combining them using diff --git a/man/clusters_get_kubernetes.Rd b/man/clusters_get_kubernetes.Rd index 7b32c665..b2e14f11 100644 --- a/man/clusters_get_kubernetes.Rd +++ b/man/clusters_get_kubernetes.Rd @@ -17,6 +17,8 @@ A list containing the following elements: \item{organizationId}{string, The id of this cluster's organization.} \item{organizationName}{string, The name of this cluster's organization.} \item{organizationSlug}{string, The slug of this cluster's organization.} +\item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +\item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} \item{clusterPartitions}{array, An array containing the following fields: \itemize{ \item clusterPartitionId integer, The ID of this cluster partition. @@ -26,7 +28,7 @@ A list containing the following elements: \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. }} \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} -\item{hours}{number, The number of hours used this month for this cluster } +\item{hours}{number, The number of hours used this month for this cluster.} } \description{ Describe a Kubernetes Cluster diff --git a/man/clusters_get_kubernetes_instance_configs.Rd b/man/clusters_get_kubernetes_instance_configs.Rd index 29837b2f..8f68948f 100644 --- a/man/clusters_get_kubernetes_instance_configs.Rd +++ b/man/clusters_get_kubernetes_instance_configs.Rd @@ -17,7 +17,7 @@ clusters_get_kubernetes_instance_configs( \value{ A list containing the following elements: \item{instanceConfigId}{integer, The ID of this InstanceConfig.} -\item{instanceType}{string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge.} +\item{instanceType}{string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge.} \item{minInstances}{integer, The minimum number of instances of that type in this cluster.} \item{maxInstances}{integer, The maximum number of instances of that type in this cluster.} \item{instanceMaxMemory}{integer, The amount of memory (RAM) available to a single instance of that type in megabytes.} @@ -32,6 +32,8 @@ A list containing the following elements: \item pendingDeployments integer, The number of pending deployments in this instance config. \item runningDeployments integer, The number of running deployments in this instance config. }} +\item{clusterPartitionId}{integer, The ID of this InstanceConfig's cluster partition} +\item{clusterPartitionName}{string, The name of this InstanceConfig's cluster partition} } \description{ Describe an Instance Config diff --git a/man/clusters_get_kubernetes_partitions.Rd b/man/clusters_get_kubernetes_partitions.Rd index 92465f2c..4f35e53a 100644 --- a/man/clusters_get_kubernetes_partitions.Rd +++ b/man/clusters_get_kubernetes_partitions.Rd @@ -25,7 +25,7 @@ A list containing the following elements: \item{instanceConfigs}{array, An array containing the following fields: \itemize{ \item instanceConfigId integer, The ID of this InstanceConfig. -\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. +\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. \item minInstances integer, The minimum number of instances of that type in this cluster. \item maxInstances integer, The maximum number of instances of that type in this cluster. \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. diff --git a/man/clusters_list_kubernetes.Rd b/man/clusters_list_kubernetes.Rd index cd868851..a226182e 100644 --- a/man/clusters_list_kubernetes.Rd +++ b/man/clusters_list_kubernetes.Rd @@ -5,7 +5,10 @@ \title{List Kubernetes Clusters} \usage{ clusters_list_kubernetes( + organization_id = NULL, organization_slug = NULL, + raw_cluster_slug = NULL, + exclude_inactive_orgs = NULL, limit = NULL, page_num = NULL, order = NULL, @@ -13,7 +16,13 @@ clusters_list_kubernetes( ) } \arguments{ -\item{organization_slug}{string optional. The slug of this cluster's organization.} +\item{organization_id}{integer optional. The ID of this cluster's organization. Cannot be used along with the organization slug.} + +\item{organization_slug}{string optional. The slug of this cluster's organization. Cannot be used along with the organization ID.} + +\item{raw_cluster_slug}{string optional. The slug of this cluster's raw configuration.} + +\item{exclude_inactive_orgs}{boolean optional. When true, excludes KubeClusters associated with inactive orgs. Defaults to false.} \item{limit}{integer optional. Number of results to return. Defaults to its maximum of 50.} @@ -29,6 +38,8 @@ An array containing the following fields: \item{organizationId}{string, The id of this cluster's organization.} \item{organizationName}{string, The name of this cluster's organization.} \item{organizationSlug}{string, The slug of this cluster's organization.} +\item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +\item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} \item{clusterPartitions}{array, An array containing the following fields: \itemize{ \item clusterPartitionId integer, The ID of this cluster partition. diff --git a/man/clusters_list_kubernetes_deployments.Rd b/man/clusters_list_kubernetes_deployments.Rd index a2308a6d..52c34017 100644 --- a/man/clusters_list_kubernetes_deployments.Rd +++ b/man/clusters_list_kubernetes_deployments.Rd @@ -48,6 +48,8 @@ An array containing the following fields: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } } diff --git a/man/clusters_list_kubernetes_instance_configs_active_workloads.Rd b/man/clusters_list_kubernetes_instance_configs_active_workloads.Rd new file mode 100644 index 00000000..a9c48572 --- /dev/null +++ b/man/clusters_list_kubernetes_instance_configs_active_workloads.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{clusters_list_kubernetes_instance_configs_active_workloads} +\alias{clusters_list_kubernetes_instance_configs_active_workloads} +\title{List active workloads in an Instance Config} +\usage{ +clusters_list_kubernetes_instance_configs_active_workloads(id, state = NULL) +} +\arguments{ +\item{id}{integer required. The id of the instance config.} + +\item{state}{string optional. If specified, return workloads in these states. It accepts a comma-separated list, possible values are pending, running} +} +\value{ +An array containing the following fields: +\item{id}{integer, The id of this deployment.} +\item{baseType}{string, The base type of this deployment.} +\item{baseId}{integer, The id of the base object associated with this deployment.} +\item{baseObjectName}{string, The name of the base object associated with this deployment. Null if you do not have permission to read the object.} +\item{jobType}{string, If the base object is a job run you have permission to read, the type of the job. One of "python_script", "r_script", "container_script", or "custom_script".} +\item{jobId}{integer, If the base object is a job run you have permission to read, the id of the job.} +\item{jobCancelRequestedAt}{string, If the base object is a job run you have permission to read, and it was requested to be cancelled, the timestamp of that request.} +\item{state}{string, The state of this deployment.} +\item{cpu}{integer, The CPU in millicores requested by this deployment.} +\item{memory}{integer, The memory in MB requested by this deployment.} +\item{diskSpace}{integer, The disk space in GB requested by this deployment.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, The timestamp of when the deployment began.} +\item{cancellable}{boolean, True if you have permission to cancel this deployment.} +} +\description{ +List active workloads in an Instance Config +} diff --git a/man/clusters_list_kubernetes_partitions.Rd b/man/clusters_list_kubernetes_partitions.Rd index 09dd9c84..38cb6377 100644 --- a/man/clusters_list_kubernetes_partitions.Rd +++ b/man/clusters_list_kubernetes_partitions.Rd @@ -19,7 +19,7 @@ An array containing the following fields: \item{instanceConfigs}{array, An array containing the following fields: \itemize{ \item instanceConfigId integer, The ID of this InstanceConfig. -\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. +\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. \item minInstances integer, The minimum number of instances of that type in this cluster. \item maxInstances integer, The maximum number of instances of that type in this cluster. \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. diff --git a/man/clusters_patch_kubernetes.Rd b/man/clusters_patch_kubernetes.Rd index 4a280a9c..2084125e 100644 --- a/man/clusters_patch_kubernetes.Rd +++ b/man/clusters_patch_kubernetes.Rd @@ -4,11 +4,13 @@ \alias{clusters_patch_kubernetes} \title{Update a Kubernetes Cluster} \usage{ -clusters_patch_kubernetes(id, is_nat_enabled = NULL) +clusters_patch_kubernetes(id, raw_cluster_slug = NULL, is_nat_enabled = NULL) } \arguments{ \item{id}{integer required. The ID of this cluster.} +\item{raw_cluster_slug}{string optional. The slug of this cluster's raw configuration.} + \item{is_nat_enabled}{boolean optional. Whether this cluster needs a NAT gateway or not.} } \value{ @@ -17,6 +19,8 @@ A list containing the following elements: \item{organizationId}{string, The id of this cluster's organization.} \item{organizationName}{string, The name of this cluster's organization.} \item{organizationSlug}{string, The slug of this cluster's organization.} +\item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +\item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} \item{clusterPartitions}{array, An array containing the following fields: \itemize{ \item clusterPartitionId integer, The ID of this cluster partition. @@ -26,7 +30,7 @@ A list containing the following elements: \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. }} \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} -\item{hours}{number, The number of hours used this month for this cluster } +\item{hours}{number, The number of hours used this month for this cluster.} } \description{ Update a Kubernetes Cluster diff --git a/man/clusters_patch_kubernetes_partitions.Rd b/man/clusters_patch_kubernetes_partitions.Rd index cb440077..fd7a5309 100644 --- a/man/clusters_patch_kubernetes_partitions.Rd +++ b/man/clusters_patch_kubernetes_partitions.Rd @@ -19,7 +19,7 @@ clusters_patch_kubernetes_partitions( \item{instance_configs}{array optional. An array containing the following fields: \itemize{ -\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. +\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. \item minInstances integer, The minimum number of instances of that type in this cluster. \item maxInstances integer, The maximum number of instances of that type in this cluster. }} @@ -36,7 +36,7 @@ A list containing the following elements: \item{instanceConfigs}{array, An array containing the following fields: \itemize{ \item instanceConfigId integer, The ID of this InstanceConfig. -\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. +\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. \item minInstances integer, The minimum number of instances of that type in this cluster. \item maxInstances integer, The maximum number of instances of that type in this cluster. \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. diff --git a/man/clusters_post_kubernetes.Rd b/man/clusters_post_kubernetes.Rd index a1c78358..482d217e 100644 --- a/man/clusters_post_kubernetes.Rd +++ b/man/clusters_post_kubernetes.Rd @@ -7,6 +7,7 @@ clusters_post_kubernetes( organization_id = NULL, organization_slug = NULL, + raw_cluster_slug = NULL, is_nat_enabled = NULL ) } @@ -15,6 +16,8 @@ clusters_post_kubernetes( \item{organization_slug}{string optional. The slug of this cluster's organization.} +\item{raw_cluster_slug}{string optional. The slug of this cluster's raw configuration.} + \item{is_nat_enabled}{boolean optional. Whether this cluster needs a NAT gateway or not.} } \value{ @@ -23,6 +26,8 @@ A list containing the following elements: \item{organizationId}{string, The id of this cluster's organization.} \item{organizationName}{string, The name of this cluster's organization.} \item{organizationSlug}{string, The slug of this cluster's organization.} +\item{rawClusterSlug}{string, The slug of this cluster's raw configuration.} +\item{customPartitions}{boolean, Whether this cluster has a custom partition configuration.} \item{clusterPartitions}{array, An array containing the following fields: \itemize{ \item clusterPartitionId integer, The ID of this cluster partition. @@ -32,7 +37,7 @@ A list containing the following elements: \item defaultInstanceConfigId integer, The id of the InstanceConfig that is the default for this partition. }} \item{isNatEnabled}{boolean, Whether this cluster needs a NAT gateway or not.} -\item{hours}{number, The number of hours used this month for this cluster } +\item{hours}{number, The number of hours used this month for this cluster.} } \description{ Create a Kubernetes Cluster diff --git a/man/clusters_post_kubernetes_partitions.Rd b/man/clusters_post_kubernetes_partitions.Rd index 710a3791..ee5d6f74 100644 --- a/man/clusters_post_kubernetes_partitions.Rd +++ b/man/clusters_post_kubernetes_partitions.Rd @@ -11,7 +11,7 @@ clusters_post_kubernetes_partitions(id, instance_configs, name, labels) \item{instance_configs}{array required. An array containing the following fields: \itemize{ -\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. +\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. \item minInstances integer, The minimum number of instances of that type in this cluster. \item maxInstances integer, The maximum number of instances of that type in this cluster. }} @@ -28,7 +28,7 @@ A list containing the following elements: \item{instanceConfigs}{array, An array containing the following fields: \itemize{ \item instanceConfigId integer, The ID of this InstanceConfig. -\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and p2.xlarge. +\item instanceType string, An EC2 instance type. Possible values include t2.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m5.12xlarge, and c5.18xlarge. \item minInstances integer, The minimum number of instances of that type in this cluster. \item maxInstances integer, The maximum number of instances of that type in this cluster. \item instanceMaxMemory integer, The amount of memory (RAM) available to a single instance of that type in megabytes. diff --git a/man/credentials_delete.Rd b/man/credentials_delete.Rd new file mode 100644 index 00000000..ca4a2c7b --- /dev/null +++ b/man/credentials_delete.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{credentials_delete} +\alias{credentials_delete} +\title{Delete a credential} +\usage{ +credentials_delete(id) +} +\arguments{ +\item{id}{integer required. The ID of the credential.} +} +\value{ +An empty HTTP response +} +\description{ +Delete a credential +} diff --git a/man/credentials_get.Rd b/man/credentials_get.Rd index 9d3d91ee..eb92c15c 100644 --- a/man/credentials_get.Rd +++ b/man/credentials_get.Rd @@ -16,12 +16,21 @@ A list containing the following elements: \item{type}{string, The credential's type.} \item{username}{string, The username for the credential.} \item{description}{string, A long description of the credential.} -\item{owner}{string, The name of the user who this credential belongs to.} +\item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} \item{remoteHostName}{string, The name of the remote host associated with this credential.} \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} \item{createdAt}{string, The creation time for this credential.} \item{updatedAt}{string, The last modification time for this credential.} +\item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} } \description{ Get a credential diff --git a/man/credentials_list.Rd b/man/credentials_list.Rd index 684da77d..fbe805d2 100644 --- a/man/credentials_list.Rd +++ b/man/credentials_list.Rd @@ -8,6 +8,9 @@ credentials_list( type = NULL, remote_host_id = NULL, default = NULL, + system_credentials = NULL, + users = NULL, + name = NULL, limit = NULL, page_num = NULL, order = NULL, @@ -21,6 +24,12 @@ credentials_list( \item{default}{boolean optional. If true, will return a list with a single credential which is the current user's default credential.} +\item{system_credentials}{boolean optional. If true, will only return system credentials. System credentials can only be created and viewed by Civis Admins.} + +\item{users}{string optional. A comma-separated list of user ids. If specified, returns set of credentials owned by the users that requesting user has at least read access on.} + +\item{name}{string optional. If specified, will be used to filter the credentials returned. Will search across name and will return any full name containing the search string.} + \item{limit}{integer optional. Number of results to return. Defaults to its maximum of 1000.} \item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} @@ -36,12 +45,21 @@ An array containing the following fields: \item{type}{string, The credential's type.} \item{username}{string, The username for the credential.} \item{description}{string, A long description of the credential.} -\item{owner}{string, The name of the user who this credential belongs to.} +\item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} \item{remoteHostName}{string, The name of the remote host associated with this credential.} \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} \item{createdAt}{string, The creation time for this credential.} \item{updatedAt}{string, The last modification time for this credential.} +\item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} } \description{ List credentials diff --git a/man/credentials_list_dependencies.Rd b/man/credentials_list_dependencies.Rd new file mode 100644 index 00000000..23545546 --- /dev/null +++ b/man/credentials_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{credentials_list_dependencies} +\alias{credentials_list_dependencies} +\title{List dependent objects for this object} +\usage{ +credentials_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/credentials_list_types.Rd b/man/credentials_list_types.Rd new file mode 100644 index 00000000..a2aec5a6 --- /dev/null +++ b/man/credentials_list_types.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{credentials_list_types} +\alias{credentials_list_types} +\title{Get list of Credential Types} +\usage{ +credentials_list_types() +} +\value{ +A list containing the following elements: +\item{types}{array, list of acceptable credential types} +} +\description{ +Get list of Credential Types +} diff --git a/man/credentials_patch.Rd b/man/credentials_patch.Rd new file mode 100644 index 00000000..eb156e83 --- /dev/null +++ b/man/credentials_patch.Rd @@ -0,0 +1,69 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{credentials_patch} +\alias{credentials_patch} +\title{Update some attributes of a credential} +\usage{ +credentials_patch( + id, + name = NULL, + type = NULL, + description = NULL, + username = NULL, + password = NULL, + remote_host_id = NULL, + user_id = NULL, + state = NULL, + system_credential = NULL, + default = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the credential.} + +\item{name}{string optional. The name identifying the credential.} + +\item{type}{string optional. The type of credential. Note: only these credentials can be created or edited via this API ["Amazon Web Services S3", "CASS/NCOA PAF", "Certificate", "Civis Platform", "Custom", "Database", "Google", "Salesforce User", "Salesforce Client", "TableauUser"]} + +\item{description}{string optional. A long description of the credential.} + +\item{username}{string optional. The username for the credential.} + +\item{password}{string optional. The password for the credential.} + +\item{remote_host_id}{integer optional. The ID of the remote host associated with the credential.} + +\item{user_id}{integer optional. The ID of the user the credential is created for. Note: This attribute is only accepted if you are a Civis Admin User.} + +\item{state}{string optional. The U.S. state for the credential. Only for VAN credentials.} + +\item{system_credential}{boolean optional. Boolean flag that sets a credential to be a system credential. System credentials can only be created by Civis Admins and will create a credential owned by the Civis Robot user.} + +\item{default}{boolean optional. Whether or not the credential is a default. Only for Database credentials.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of the credential.} +\item{name}{string, The name identifying the credential} +\item{type}{string, The credential's type.} +\item{username}{string, The username for the credential.} +\item{description}{string, A long description of the credential.} +\item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{remoteHostId}{integer, The ID of the remote host associated with this credential.} +\item{remoteHostName}{string, The name of the remote host associated with this credential.} +\item{state}{string, The U.S. state for the credential. Only for VAN credentials.} +\item{createdAt}{string, The creation time for this credential.} +\item{updatedAt}{string, The last modification time for this credential.} +\item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} +} +\description{ +Update some attributes of a credential +} diff --git a/man/credentials_post.Rd b/man/credentials_post.Rd index ae6f5e44..d806dadd 100644 --- a/man/credentials_post.Rd +++ b/man/credentials_post.Rd @@ -11,12 +11,14 @@ credentials_post( name = NULL, description = NULL, remote_host_id = NULL, + user_id = NULL, state = NULL, - system_credential = NULL + system_credential = NULL, + default = NULL ) } \arguments{ -\item{type}{string required.} +\item{type}{string required. The type of credential. Note: only these credentials can be created or edited via this API ["Amazon Web Services S3", "CASS/NCOA PAF", "Certificate", "Civis Platform", "Custom", "Database", "Google", "Salesforce User", "Salesforce Client", "TableauUser"]} \item{username}{string required. The username for the credential.} @@ -28,9 +30,13 @@ credentials_post( \item{remote_host_id}{integer optional. The ID of the remote host associated with the credential.} +\item{user_id}{integer optional. The ID of the user the credential is created for. Note: This attribute is only accepted if you are a Civis Admin User.} + \item{state}{string optional. The U.S. state for the credential. Only for VAN credentials.} -\item{system_credential}{boolean optional.} +\item{system_credential}{boolean optional. Boolean flag that sets a credential to be a system credential. System credentials can only be created by Civis Admins and will create a credential owned by the Civis Robot user.} + +\item{default}{boolean optional. Whether or not the credential is a default. Only for Database credentials.} } \value{ A list containing the following elements: @@ -39,12 +45,21 @@ A list containing the following elements: \item{type}{string, The credential's type.} \item{username}{string, The username for the credential.} \item{description}{string, A long description of the credential.} -\item{owner}{string, The name of the user who this credential belongs to.} +\item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} \item{remoteHostName}{string, The name of the remote host associated with this credential.} \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} \item{createdAt}{string, The creation time for this credential.} \item{updatedAt}{string, The last modification time for this credential.} +\item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} } \description{ Create a credential diff --git a/man/credentials_post_authenticate.Rd b/man/credentials_post_authenticate.Rd index 1b594d30..945f2401 100644 --- a/man/credentials_post_authenticate.Rd +++ b/man/credentials_post_authenticate.Rd @@ -9,7 +9,7 @@ credentials_post_authenticate(url, remote_host_type, username, password) \arguments{ \item{url}{string required. The URL to your host.} -\item{remote_host_type}{string required. The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{remote_host_type}{string required. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} \item{username}{string required. The username for the credential.} @@ -22,12 +22,21 @@ A list containing the following elements: \item{type}{string, The credential's type.} \item{username}{string, The username for the credential.} \item{description}{string, A long description of the credential.} -\item{owner}{string, The name of the user who this credential belongs to.} +\item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} \item{remoteHostName}{string, The name of the remote host associated with this credential.} \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} \item{createdAt}{string, The creation time for this credential.} \item{updatedAt}{string, The last modification time for this credential.} +\item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} } \description{ Authenticate against a remote host diff --git a/man/credentials_put.Rd b/man/credentials_put.Rd index d878266a..e4fdc764 100644 --- a/man/credentials_put.Rd +++ b/man/credentials_put.Rd @@ -12,14 +12,16 @@ credentials_put( name = NULL, description = NULL, remote_host_id = NULL, + user_id = NULL, state = NULL, - system_credential = NULL + system_credential = NULL, + default = NULL ) } \arguments{ \item{id}{integer required. The ID of the credential.} -\item{type}{string required.} +\item{type}{string required. The type of credential. Note: only these credentials can be created or edited via this API ["Amazon Web Services S3", "CASS/NCOA PAF", "Certificate", "Civis Platform", "Custom", "Database", "Google", "Salesforce User", "Salesforce Client", "TableauUser"]} \item{username}{string required. The username for the credential.} @@ -31,9 +33,13 @@ credentials_put( \item{remote_host_id}{integer optional. The ID of the remote host associated with the credential.} +\item{user_id}{integer optional. The ID of the user the credential is created for. Note: This attribute is only accepted if you are a Civis Admin User.} + \item{state}{string optional. The U.S. state for the credential. Only for VAN credentials.} -\item{system_credential}{boolean optional.} +\item{system_credential}{boolean optional. Boolean flag that sets a credential to be a system credential. System credentials can only be created by Civis Admins and will create a credential owned by the Civis Robot user.} + +\item{default}{boolean optional. Whether or not the credential is a default. Only for Database credentials.} } \value{ A list containing the following elements: @@ -42,12 +48,21 @@ A list containing the following elements: \item{type}{string, The credential's type.} \item{username}{string, The username for the credential.} \item{description}{string, A long description of the credential.} -\item{owner}{string, The name of the user who this credential belongs to.} +\item{owner}{string, The username of the user who this credential belongs to. Using user.username is preferred.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} \item{remoteHostId}{integer, The ID of the remote host associated with this credential.} \item{remoteHostName}{string, The name of the remote host associated with this credential.} \item{state}{string, The U.S. state for the credential. Only for VAN credentials.} \item{createdAt}{string, The creation time for this credential.} \item{updatedAt}{string, The last modification time for this credential.} +\item{default}{boolean, Whether or not the credential is a default. Only for Database credentials.} } \description{ Update an existing credential diff --git a/man/credentials_put_transfer.Rd b/man/credentials_put_transfer.Rd new file mode 100644 index 00000000..7abb89a9 --- /dev/null +++ b/man/credentials_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{credentials_put_transfer} +\alias{credentials_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +credentials_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/databases_delete_whitelist_ips.Rd b/man/databases_delete_whitelist_ips.Rd deleted file mode 100644 index 3d721761..00000000 --- a/man/databases_delete_whitelist_ips.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{databases_delete_whitelist_ips} -\alias{databases_delete_whitelist_ips} -\title{Remove a whitelisted IP address} -\usage{ -databases_delete_whitelist_ips(id, whitelisted_ip_id) -} -\arguments{ -\item{id}{integer required. The ID of the database this rule is applied to.} - -\item{whitelisted_ip_id}{integer required. The ID of this whitelisted IP address.} -} -\value{ -An empty HTTP response -} -\description{ -Remove a whitelisted IP address -} diff --git a/man/databases_get_schema_privileges.Rd b/man/databases_get_schema_privileges.Rd new file mode 100644 index 00000000..38adc276 --- /dev/null +++ b/man/databases_get_schema_privileges.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{databases_get_schema_privileges} +\alias{databases_get_schema_privileges} +\title{Show schema privileges} +\usage{ +databases_get_schema_privileges(id, schema_name) +} +\arguments{ +\item{id}{integer required. The ID of the database} + +\item{schema_name}{string required. The name of the schema} +} +\value{ +A list containing the following elements: +\item{grantee}{string, Name of the granted user or group} +\item{granteeType}{string, User or group} +\item{privileges}{array, Privileges that the grantee has on this resource} +\item{grantablePrivileges}{array, Privileges that the grantee can grant to others for this resource} +} +\description{ +Show schema privileges +} diff --git a/man/databases_get_table_privilegesschema_name.Rd b/man/databases_get_table_privilegesschema_name.Rd new file mode 100644 index 00000000..32f8cef3 --- /dev/null +++ b/man/databases_get_table_privilegesschema_name.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{databases_get_table_privilegesschema_name} +\alias{databases_get_table_privilegesschema_name} +\title{Show table privileges} +\usage{ +databases_get_table_privilegesschema_name(id, schema_name, table_name) +} +\arguments{ +\item{id}{integer required. The ID of the database} + +\item{schema_name}{string required. The name of the schema} + +\item{table_name}{string required. The name of the table} +} +\value{ +A list containing the following elements: +\item{grantee}{string, Name of the granted user or group} +\item{granteeType}{string, User or group} +\item{privileges}{array, Privileges that the grantee has on this resource} +\item{grantablePrivileges}{array, Privileges that the grantee can grant to others for this resource} +} +\description{ +Show table privileges +} diff --git a/man/databases_list_schemas.Rd b/man/databases_list_schemas.Rd index 7212552f..13d474db 100644 --- a/man/databases_list_schemas.Rd +++ b/man/databases_list_schemas.Rd @@ -4,10 +4,14 @@ \alias{databases_list_schemas} \title{List schemas in this database} \usage{ -databases_list_schemas(id) +databases_list_schemas(id, name = NULL, credential_id = NULL) } \arguments{ \item{id}{integer required. The ID of the database.} + +\item{name}{string optional. If specified, will be used to filter the schemas returned. Substring matching is supported (e.g., "name=schema" will return both "schema1" and "schema2").} + +\item{credential_id}{integer optional. If provided, schemas will be filtered based on the given credential.} } \value{ An array containing the following fields: diff --git a/man/databases_list_tables.Rd b/man/databases_list_tables.Rd index 5cfa74b0..d5c869e6 100644 --- a/man/databases_list_tables.Rd +++ b/man/databases_list_tables.Rd @@ -52,6 +52,11 @@ An array containing the following fields: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{tableTags}{array, An array containing the following fields: +\itemize{ +\item id integer, Table Tag ID +\item name string, Table Tag Name +}} } \description{ List tables in the specified database, deprecated use "GET /tables" instead diff --git a/man/databases_list_tables_search.Rd b/man/databases_list_tables_search.Rd index e78f08cd..6e7f07b9 100644 --- a/man/databases_list_tables_search.Rd +++ b/man/databases_list_tables_search.Rd @@ -39,6 +39,11 @@ An array containing the following fields: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{tableTags}{array, An array containing the following fields: +\itemize{ +\item id integer, Table Tag ID +\item name string, Table Tag Name +}} \item{columnNames}{array, The names of each column in the table.} } \description{ diff --git a/man/databases_post_whitelist_ips.Rd b/man/databases_post_whitelist_ips.Rd deleted file mode 100644 index 60c18719..00000000 --- a/man/databases_post_whitelist_ips.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generated_client.R -\name{databases_post_whitelist_ips} -\alias{databases_post_whitelist_ips} -\title{Whitelist an IP address} -\usage{ -databases_post_whitelist_ips(id, subnet_mask) -} -\arguments{ -\item{id}{integer required. The ID of the database this rule is applied to.} - -\item{subnet_mask}{string required. The subnet mask that is allowed by this rule.} -} -\value{ -A list containing the following elements: -\item{id}{integer, The ID of this whitelisted IP address.} -\item{remoteHostId}{integer, The ID of the database this rule is applied to.} -\item{securityGroupId}{string, The ID of the security group this rule is applied to.} -\item{subnetMask}{string, The subnet mask that is allowed by this rule.} -\item{authorizedBy}{string, The user who authorized this rule.} -\item{isActive}{boolean, True if the rule is applied, false if it has been revoked.} -\item{createdAt}{string, The time this rule was created.} -\item{updatedAt}{string, The time this rule was last updated.} -} -\description{ -Whitelist an IP address -} diff --git a/man/enhancements_get_cass_ncoa.Rd b/man/enhancements_get_cass_ncoa.Rd index 7f7fd7f8..273a56d1 100644 --- a/man/enhancements_get_cass_ncoa.Rd +++ b/man/enhancements_get_cass_ncoa.Rd @@ -28,10 +28,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -55,6 +56,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{source}{list, A list containing the following elements: \itemize{ \item databaseTable list . A list containing the following elements: @@ -89,6 +91,7 @@ A list containing the following elements: \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +\item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} \item{archived}{string, The archival status of the requested item(s).} } \description{ diff --git a/man/enhancements_get_cass_ncoa_runs.Rd b/man/enhancements_get_cass_ncoa_runs.Rd index da0c2d1c..5339d256 100644 --- a/man/enhancements_get_cass_ncoa_runs.Rd +++ b/man/enhancements_get_cass_ncoa_runs.Rd @@ -17,8 +17,9 @@ A list containing the following elements: \item{cassNcoaId}{integer, The ID of the cass_ncoa.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_get_civis_data_match.Rd b/man/enhancements_get_civis_data_match.Rd index e3db7ae6..ca6415cb 100644 --- a/man/enhancements_get_civis_data_match.Rd +++ b/man/enhancements_get_civis_data_match.Rd @@ -28,10 +28,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -55,7 +56,8 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{inputTable}{list, A list containing the following elements: \itemize{ \item databaseName string, The Redshift database name for the table. @@ -70,7 +72,7 @@ A list containing the following elements: \item table string, The table name. }} \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} \item{lastRun}{list, A list containing the following elements: \itemize{ diff --git a/man/enhancements_get_civis_data_match_runs.Rd b/man/enhancements_get_civis_data_match_runs.Rd index 0a634277..f5430ae8 100644 --- a/man/enhancements_get_civis_data_match_runs.Rd +++ b/man/enhancements_get_civis_data_match_runs.Rd @@ -17,8 +17,9 @@ A list containing the following elements: \item{civisDataMatchId}{integer, The ID of the civis_data_match.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_get_geocode.Rd b/man/enhancements_get_geocode.Rd index d6d7fbd2..4741343b 100644 --- a/man/enhancements_get_geocode.Rd +++ b/man/enhancements_get_geocode.Rd @@ -28,10 +28,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -55,6 +56,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{remoteHostId}{integer, The ID of the remote host.} \item{credentialId}{integer, The ID of the remote host credential.} \item{sourceSchemaAndTable}{string, The source database schema and table.} diff --git a/man/enhancements_get_geocode_runs.Rd b/man/enhancements_get_geocode_runs.Rd index d6b0cdf1..f5d3b43a 100644 --- a/man/enhancements_get_geocode_runs.Rd +++ b/man/enhancements_get_geocode_runs.Rd @@ -17,8 +17,9 @@ A list containing the following elements: \item{geocodeId}{integer, The ID of the geocode.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_list.Rd b/man/enhancements_list.Rd index d4c33f58..38037ead 100644 --- a/man/enhancements_list.Rd +++ b/man/enhancements_list.Rd @@ -18,7 +18,7 @@ enhancements_list( \arguments{ \item{type}{string optional. If specified, return items of these types.} -\item{author}{string optional. If specified, return items from this author. Must use user IDs. A comma separated list of IDs is also accepted to return items from multiple authors.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} \item{status}{string optional. If specified, returns items with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.} diff --git a/man/enhancements_list_cass_ncoa_dependencies.Rd b/man/enhancements_list_cass_ncoa_dependencies.Rd new file mode 100644 index 00000000..6075d5f3 --- /dev/null +++ b/man/enhancements_list_cass_ncoa_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{enhancements_list_cass_ncoa_dependencies} +\alias{enhancements_list_cass_ncoa_dependencies} +\title{List dependent objects for this object} +\usage{ +enhancements_list_cass_ncoa_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/enhancements_list_cass_ncoa_runs.Rd b/man/enhancements_list_cass_ncoa_runs.Rd index 641c2405..e38f927c 100644 --- a/man/enhancements_list_cass_ncoa_runs.Rd +++ b/man/enhancements_list_cass_ncoa_runs.Rd @@ -29,8 +29,9 @@ An array containing the following fields: \item{cassNcoaId}{integer, The ID of the cass_ncoa.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_list_civis_data_match_dependencies.Rd b/man/enhancements_list_civis_data_match_dependencies.Rd new file mode 100644 index 00000000..a0fb9492 --- /dev/null +++ b/man/enhancements_list_civis_data_match_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{enhancements_list_civis_data_match_dependencies} +\alias{enhancements_list_civis_data_match_dependencies} +\title{List dependent objects for this object} +\usage{ +enhancements_list_civis_data_match_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/enhancements_list_civis_data_match_runs.Rd b/man/enhancements_list_civis_data_match_runs.Rd index e7b4e7bd..fa621c6e 100644 --- a/man/enhancements_list_civis_data_match_runs.Rd +++ b/man/enhancements_list_civis_data_match_runs.Rd @@ -29,8 +29,9 @@ An array containing the following fields: \item{civisDataMatchId}{integer, The ID of the civis_data_match.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_list_geocode_dependencies.Rd b/man/enhancements_list_geocode_dependencies.Rd new file mode 100644 index 00000000..494c11e1 --- /dev/null +++ b/man/enhancements_list_geocode_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{enhancements_list_geocode_dependencies} +\alias{enhancements_list_geocode_dependencies} +\title{List dependent objects for this object} +\usage{ +enhancements_list_geocode_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/enhancements_list_geocode_runs.Rd b/man/enhancements_list_geocode_runs.Rd index b4d7fb57..f6ef669c 100644 --- a/man/enhancements_list_geocode_runs.Rd +++ b/man/enhancements_list_geocode_runs.Rd @@ -29,8 +29,9 @@ An array containing the following fields: \item{geocodeId}{integer, The ID of the geocode.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_patch_cass_ncoa.Rd b/man/enhancements_patch_cass_ncoa.Rd index fde4dc54..5e81190e 100644 --- a/man/enhancements_patch_cass_ncoa.Rd +++ b/man/enhancements_patch_cass_ncoa.Rd @@ -17,7 +17,8 @@ enhancements_patch_cass_ncoa( perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, - limiting_sql = NULL + limiting_sql = NULL, + chunk_size = NULL ) } \arguments{ @@ -28,10 +29,11 @@ enhancements_patch_cass_ncoa( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -91,6 +93,8 @@ enhancements_patch_cass_ncoa( \item{output_level}{string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limiting_sql}{string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} + +\item{chunk_size}{integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users.} } \value{ A list containing the following elements: @@ -111,10 +115,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -138,6 +143,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{source}{list, A list containing the following elements: \itemize{ \item databaseTable list . A list containing the following elements: @@ -172,6 +178,7 @@ A list containing the following elements: \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +\item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} \item{archived}{string, The archival status of the requested item(s).} } \description{ diff --git a/man/enhancements_patch_civis_data_match.Rd b/man/enhancements_patch_civis_data_match.Rd index 92eac468..f5c255b9 100644 --- a/man/enhancements_patch_civis_data_match.Rd +++ b/man/enhancements_patch_civis_data_match.Rd @@ -27,10 +27,11 @@ enhancements_patch_civis_data_match( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -49,7 +50,7 @@ enhancements_patch_civis_data_match( \item failureOn boolean, If failure email notifications are on. }} -\item{input_field_mapping}{list optional. The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{input_field_mapping}{list optional. The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{input_table}{list optional. A list containing the following elements: \itemize{ @@ -69,7 +70,7 @@ enhancements_patch_civis_data_match( \item{max_matches}{integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean optional. Whether the Civis Data Match Job has been archived.} } @@ -92,10 +93,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -119,7 +121,8 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{inputTable}{list, A list containing the following elements: \itemize{ \item databaseName string, The Redshift database name for the table. @@ -134,7 +137,7 @@ A list containing the following elements: \item table string, The table name. }} \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} \item{lastRun}{list, A list containing the following elements: \itemize{ diff --git a/man/enhancements_patch_geocode.Rd b/man/enhancements_patch_geocode.Rd index 981fba07..d7fcea93 100644 --- a/man/enhancements_patch_geocode.Rd +++ b/man/enhancements_patch_geocode.Rd @@ -30,10 +30,11 @@ enhancements_patch_geocode( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -91,10 +92,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -118,6 +120,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{remoteHostId}{integer, The ID of the remote host.} \item{credentialId}{integer, The ID of the remote host credential.} \item{sourceSchemaAndTable}{string, The source database schema and table.} diff --git a/man/enhancements_post_cass_ncoa.Rd b/man/enhancements_post_cass_ncoa.Rd index 0f52fd06..17123448 100644 --- a/man/enhancements_post_cass_ncoa.Rd +++ b/man/enhancements_post_cass_ncoa.Rd @@ -16,7 +16,8 @@ enhancements_post_cass_ncoa( perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, - limiting_sql = NULL + limiting_sql = NULL, + chunk_size = NULL ) } \arguments{ @@ -37,10 +38,11 @@ enhancements_post_cass_ncoa( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -88,6 +90,8 @@ enhancements_post_cass_ncoa( \item{output_level}{string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limiting_sql}{string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} + +\item{chunk_size}{integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users.} } \value{ A list containing the following elements: @@ -108,10 +112,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -135,6 +140,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{source}{list, A list containing the following elements: \itemize{ \item databaseTable list . A list containing the following elements: @@ -169,6 +175,7 @@ A list containing the following elements: \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +\item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} \item{archived}{string, The archival status of the requested item(s).} } \description{ diff --git a/man/enhancements_post_cass_ncoa_runs.Rd b/man/enhancements_post_cass_ncoa_runs.Rd index 0b91e8c2..caee2a8c 100644 --- a/man/enhancements_post_cass_ncoa_runs.Rd +++ b/man/enhancements_post_cass_ncoa_runs.Rd @@ -15,8 +15,9 @@ A list containing the following elements: \item{cassNcoaId}{integer, The ID of the cass_ncoa.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_post_civis_data_match.Rd b/man/enhancements_post_civis_data_match.Rd index f66fdf8a..4663e721 100644 --- a/man/enhancements_post_civis_data_match.Rd +++ b/man/enhancements_post_civis_data_match.Rd @@ -21,7 +21,7 @@ enhancements_post_civis_data_match( \arguments{ \item{name}{string required. The name of the enhancement job.} -\item{input_field_mapping}{list required. The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{input_field_mapping}{list required. The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{input_table}{list required. A list containing the following elements: \itemize{ @@ -42,10 +42,11 @@ enhancements_post_civis_data_match( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -66,7 +67,7 @@ enhancements_post_civis_data_match( \item{max_matches}{integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean optional. Whether the Civis Data Match Job has been archived.} } @@ -89,10 +90,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -116,7 +118,8 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{inputTable}{list, A list containing the following elements: \itemize{ \item databaseName string, The Redshift database name for the table. @@ -131,7 +134,7 @@ A list containing the following elements: \item table string, The table name. }} \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} \item{lastRun}{list, A list containing the following elements: \itemize{ diff --git a/man/enhancements_post_civis_data_match_clone.Rd b/man/enhancements_post_civis_data_match_clone.Rd index 207b0591..d1a66b31 100644 --- a/man/enhancements_post_civis_data_match_clone.Rd +++ b/man/enhancements_post_civis_data_match_clone.Rd @@ -39,10 +39,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -66,7 +67,8 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{inputTable}{list, A list containing the following elements: \itemize{ \item databaseName string, The Redshift database name for the table. @@ -81,7 +83,7 @@ A list containing the following elements: \item table string, The table name. }} \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} \item{lastRun}{list, A list containing the following elements: \itemize{ diff --git a/man/enhancements_post_civis_data_match_runs.Rd b/man/enhancements_post_civis_data_match_runs.Rd index 0b48505e..0a64772a 100644 --- a/man/enhancements_post_civis_data_match_runs.Rd +++ b/man/enhancements_post_civis_data_match_runs.Rd @@ -15,8 +15,9 @@ A list containing the following elements: \item{civisDataMatchId}{integer, The ID of the civis_data_match.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_post_geocode.Rd b/man/enhancements_post_geocode.Rd index 58eaaa2f..4b0cab50 100644 --- a/man/enhancements_post_geocode.Rd +++ b/man/enhancements_post_geocode.Rd @@ -33,10 +33,11 @@ enhancements_post_geocode( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -88,10 +89,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -115,6 +117,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{remoteHostId}{integer, The ID of the remote host.} \item{credentialId}{integer, The ID of the remote host credential.} \item{sourceSchemaAndTable}{string, The source database schema and table.} diff --git a/man/enhancements_post_geocode_runs.Rd b/man/enhancements_post_geocode_runs.Rd index f72a2c4f..52eeab8a 100644 --- a/man/enhancements_post_geocode_runs.Rd +++ b/man/enhancements_post_geocode_runs.Rd @@ -15,8 +15,9 @@ A list containing the following elements: \item{geocodeId}{integer, The ID of the geocode.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/enhancements_put_cass_ncoa.Rd b/man/enhancements_put_cass_ncoa.Rd index 8c71dca1..dcc4b033 100644 --- a/man/enhancements_put_cass_ncoa.Rd +++ b/man/enhancements_put_cass_ncoa.Rd @@ -17,7 +17,8 @@ enhancements_put_cass_ncoa( perform_ncoa = NULL, ncoa_credential_id = NULL, output_level = NULL, - limiting_sql = NULL + limiting_sql = NULL, + chunk_size = NULL ) } \arguments{ @@ -40,10 +41,11 @@ enhancements_put_cass_ncoa( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -91,6 +93,8 @@ enhancements_put_cass_ncoa( \item{output_level}{string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limiting_sql}{string optional. The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} + +\item{chunk_size}{integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users.} } \value{ A list containing the following elements: @@ -111,10 +115,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -138,6 +143,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{source}{list, A list containing the following elements: \itemize{ \item databaseTable list . A list containing the following elements: @@ -172,6 +178,7 @@ A list containing the following elements: \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +\item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} \item{archived}{string, The archival status of the requested item(s).} } \description{ diff --git a/man/enhancements_put_cass_ncoa_archive.Rd b/man/enhancements_put_cass_ncoa_archive.Rd index 568a6b88..c1938820 100644 --- a/man/enhancements_put_cass_ncoa_archive.Rd +++ b/man/enhancements_put_cass_ncoa_archive.Rd @@ -30,10 +30,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -57,6 +58,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{source}{list, A list containing the following elements: \itemize{ \item databaseTable list . A list containing the following elements: @@ -91,6 +93,7 @@ A list containing the following elements: \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} \item{limitingSQL}{string, The limiting SQL for the source table. "WHERE" should be omitted (e.g. state='IL').} +\item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} \item{archived}{string, The archival status of the requested item(s).} } \description{ diff --git a/man/enhancements_put_cass_ncoa_transfer.Rd b/man/enhancements_put_cass_ncoa_transfer.Rd new file mode 100644 index 00000000..4b20ffbe --- /dev/null +++ b/man/enhancements_put_cass_ncoa_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{enhancements_put_cass_ncoa_transfer} +\alias{enhancements_put_cass_ncoa_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +enhancements_put_cass_ncoa_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/enhancements_put_civis_data_match.Rd b/man/enhancements_put_civis_data_match.Rd index aad162a7..94d83fcf 100644 --- a/man/enhancements_put_civis_data_match.Rd +++ b/man/enhancements_put_civis_data_match.Rd @@ -24,7 +24,7 @@ enhancements_put_civis_data_match( \item{name}{string required. The name of the enhancement job.} -\item{input_field_mapping}{list required. The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{input_field_mapping}{list required. The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{input_table}{list required. A list containing the following elements: \itemize{ @@ -45,10 +45,11 @@ enhancements_put_civis_data_match( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -69,7 +70,7 @@ enhancements_put_civis_data_match( \item{max_matches}{integer optional. The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number optional. The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean optional. Whether the Civis Data Match Job has been archived.} } @@ -92,10 +93,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -119,7 +121,8 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{inputTable}{list, A list containing the following elements: \itemize{ \item databaseName string, The Redshift database name for the table. @@ -134,7 +137,7 @@ A list containing the following elements: \item table string, The table name. }} \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} \item{lastRun}{list, A list containing the following elements: \itemize{ diff --git a/man/enhancements_put_civis_data_match_archive.Rd b/man/enhancements_put_civis_data_match_archive.Rd index 73423005..494ae1e6 100644 --- a/man/enhancements_put_civis_data_match_archive.Rd +++ b/man/enhancements_put_civis_data_match_archive.Rd @@ -30,10 +30,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -57,7 +58,8 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{inputFieldMapping}{list, The column mapping for the input table. See /enhancements/field_mapping for list of valid fields.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{inputFieldMapping}{list, The field (i.e., column) mapping for the input table. See https://api.civisanalytics.com/enhancements/field-mapping for a list of valid field types and descriptions. Each field type should be mapped to a string specifying a column name in the input table. For field types that support multiple values (e.g., the "phone" field), a list of column names can be provided (e.g., {"phone": ["home_phone", "mobile_phone"], ...}).} \item{inputTable}{list, A list containing the following elements: \itemize{ \item databaseName string, The Redshift database name for the table. @@ -72,7 +74,7 @@ A list containing the following elements: \item table string, The table name. }} \item{maxMatches}{integer, The maximum number of matches per record in the input table to return. Must be between 0 and 10. 0 returns all matches.} -\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned.} +\item{threshold}{number, The score threshold (between 0 and 1). Matches below this threshold will not be returned. The default value is 0.5.} \item{archived}{boolean, Whether the Civis Data Match Job has been archived.} \item{lastRun}{list, A list containing the following elements: \itemize{ diff --git a/man/enhancements_put_civis_data_match_transfer.Rd b/man/enhancements_put_civis_data_match_transfer.Rd new file mode 100644 index 00000000..6325447f --- /dev/null +++ b/man/enhancements_put_civis_data_match_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{enhancements_put_civis_data_match_transfer} +\alias{enhancements_put_civis_data_match_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +enhancements_put_civis_data_match_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/enhancements_put_geocode.Rd b/man/enhancements_put_geocode.Rd index 6ca4e392..831b7b76 100644 --- a/man/enhancements_put_geocode.Rd +++ b/man/enhancements_put_geocode.Rd @@ -36,10 +36,11 @@ enhancements_put_geocode( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parent_id}{integer optional. Parent ID that triggers this enhancement.} @@ -91,10 +92,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -118,6 +120,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{remoteHostId}{integer, The ID of the remote host.} \item{credentialId}{integer, The ID of the remote host credential.} \item{sourceSchemaAndTable}{string, The source database schema and table.} diff --git a/man/enhancements_put_geocode_archive.Rd b/man/enhancements_put_geocode_archive.Rd index 55a7cf8b..b6885951 100644 --- a/man/enhancements_put_geocode_archive.Rd +++ b/man/enhancements_put_geocode_archive.Rd @@ -30,10 +30,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, Parent ID that triggers this enhancement.} \item{notifications}{list, A list containing the following elements: @@ -57,6 +58,7 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{remoteHostId}{integer, The ID of the remote host.} \item{credentialId}{integer, The ID of the remote host credential.} \item{sourceSchemaAndTable}{string, The source database schema and table.} diff --git a/man/enhancements_put_geocode_transfer.Rd b/man/enhancements_put_geocode_transfer.Rd new file mode 100644 index 00000000..1e78920f --- /dev/null +++ b/man/enhancements_put_geocode_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{enhancements_put_geocode_transfer} +\alias{enhancements_put_geocode_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +enhancements_put_geocode_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/exports_delete_files_csv_runs.Rd b/man/exports_delete_files_csv_runs.Rd new file mode 100644 index 00000000..47e3ee75 --- /dev/null +++ b/man/exports_delete_files_csv_runs.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{exports_delete_files_csv_runs} +\alias{exports_delete_files_csv_runs} +\title{Cancel a run} +\usage{ +exports_delete_files_csv_runs(id, run_id) +} +\arguments{ +\item{id}{integer required. The ID of the csv_export.} + +\item{run_id}{integer required. The ID of the run.} +} +\value{ +An empty HTTP response +} +\description{ +Cancel a run +} diff --git a/man/exports_get_files_csv.Rd b/man/exports_get_files_csv.Rd index ae97bc48..e4ea1f67 100644 --- a/man/exports_get_files_csv.Rd +++ b/man/exports_get_files_csv.Rd @@ -36,6 +36,7 @@ A list containing the following elements: \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Get a CSV Export diff --git a/man/exports_get_files_csv_runs.Rd b/man/exports_get_files_csv_runs.Rd new file mode 100644 index 00000000..24d4cc93 --- /dev/null +++ b/man/exports_get_files_csv_runs.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{exports_get_files_csv_runs} +\alias{exports_get_files_csv_runs} +\title{Check status of a run} +\usage{ +exports_get_files_csv_runs(id, run_id) +} +\arguments{ +\item{id}{integer required. The ID of the csv_export.} + +\item{run_id}{integer required. The ID of the run.} +} +\value{ +A list containing the following elements: +\item{id}{integer, } +\item{state}{string, } +\item{createdAt}{string, The time that the run was queued.} +\item{startedAt}{string, The time that the run started.} +\item{finishedAt}{string, The time that the run completed.} +\item{error}{string, The error message for this run, if present.} +\item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} +} +\description{ +Check status of a run +} diff --git a/man/exports_list.Rd b/man/exports_list.Rd index 7917b51d..adc488e9 100644 --- a/man/exports_list.Rd +++ b/man/exports_list.Rd @@ -6,8 +6,8 @@ \usage{ exports_list( type = NULL, - author = NULL, status = NULL, + author = NULL, hidden = NULL, archived = NULL, limit = NULL, @@ -19,10 +19,10 @@ exports_list( \arguments{ \item{type}{string optional. If specified, return exports of these types. It accepts a comma-separated list, possible values are 'database' and 'gdoc'.} -\item{author}{string optional. If specified, return exports from this author. It accepts a comma-separated list of author ids.} - \item{status}{string optional. If specified, returns export with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} \item{archived}{string optional. The archival status of the requested item(s).} diff --git a/man/exports_list_files_csv_runs.Rd b/man/exports_list_files_csv_runs.Rd new file mode 100644 index 00000000..474ac951 --- /dev/null +++ b/man/exports_list_files_csv_runs.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{exports_list_files_csv_runs} +\alias{exports_list_files_csv_runs} +\title{List runs for the given csv_export} +\usage{ +exports_list_files_csv_runs( + id, + limit = NULL, + page_num = NULL, + order = NULL, + order_dir = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the csv_export.} + +\item{limit}{integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to id. Must be one of: id.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.} +} +\value{ +An array containing the following fields: +\item{id}{integer, } +\item{state}{string, } +\item{createdAt}{string, The time that the run was queued.} +\item{startedAt}{string, The time that the run started.} +\item{finishedAt}{string, The time that the run completed.} +\item{error}{string, The error message for this run, if present.} +} +\description{ +List runs for the given csv_export +} diff --git a/man/exports_list_files_csv_runs_logs.Rd b/man/exports_list_files_csv_runs_logs.Rd new file mode 100644 index 00000000..0028c3e7 --- /dev/null +++ b/man/exports_list_files_csv_runs_logs.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{exports_list_files_csv_runs_logs} +\alias{exports_list_files_csv_runs_logs} +\title{Get the logs for a run} +\usage{ +exports_list_files_csv_runs_logs(id, run_id, last_id = NULL, limit = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the csv_export.} + +\item{run_id}{integer required. The ID of the run.} + +\item{last_id}{integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt.} + +\item{limit}{integer optional. The maximum number of log messages to return. Default of 10000.} +} +\value{ +An array containing the following fields: +\item{id}{integer, The ID of the log.} +\item{createdAt}{string, The time the log was created.} +\item{message}{string, The log message.} +\item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +} +\description{ +Get the logs for a run +} diff --git a/man/exports_list_files_csv_runs_outputs.Rd b/man/exports_list_files_csv_runs_outputs.Rd new file mode 100644 index 00000000..9e8d8016 --- /dev/null +++ b/man/exports_list_files_csv_runs_outputs.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{exports_list_files_csv_runs_outputs} +\alias{exports_list_files_csv_runs_outputs} +\title{List the outputs for a run} +\usage{ +exports_list_files_csv_runs_outputs( + id, + run_id, + limit = NULL, + page_num = NULL, + order = NULL, + order_dir = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the csv_export.} + +\item{run_id}{integer required. The ID of the run.} + +\item{limit}{integer optional. Number of results to return. Defaults to its maximum of 50.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, id.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.} +} +\value{ +An array containing the following fields: +\item{objectType}{string, The type of the output. Valid values are File, Table, Report, Project, Credential, or JSONValue} +\item{objectId}{integer, The ID of the output.} +\item{name}{string, The name of the output.} +\item{link}{string, The hypermedia link to the output.} +\item{value}{string, } +} +\description{ +List the outputs for a run +} diff --git a/man/exports_patch_files_csv.Rd b/man/exports_patch_files_csv.Rd index b5b0ab1b..2d597645 100644 --- a/man/exports_patch_files_csv.Rd +++ b/man/exports_patch_files_csv.Rd @@ -80,6 +80,7 @@ A list containing the following elements: \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update some attributes of this CSV Export diff --git a/man/exports_post_files_csv.Rd b/man/exports_post_files_csv.Rd index c5179aeb..caf059f9 100644 --- a/man/exports_post_files_csv.Rd +++ b/man/exports_post_files_csv.Rd @@ -77,6 +77,7 @@ A list containing the following elements: \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Create a CSV Export diff --git a/man/exports_post_files_csv_runs.Rd b/man/exports_post_files_csv_runs.Rd new file mode 100644 index 00000000..8fe61505 --- /dev/null +++ b/man/exports_post_files_csv_runs.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{exports_post_files_csv_runs} +\alias{exports_post_files_csv_runs} +\title{Start a run} +\usage{ +exports_post_files_csv_runs(id) +} +\arguments{ +\item{id}{integer required. The ID of the csv_export.} +} +\value{ +A list containing the following elements: +\item{id}{integer, } +\item{state}{string, } +\item{createdAt}{string, The time that the run was queued.} +\item{startedAt}{string, The time that the run started.} +\item{finishedAt}{string, The time that the run completed.} +\item{error}{string, The error message for this run, if present.} +\item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} +} +\description{ +Start a run +} diff --git a/man/exports_put_files_csv.Rd b/man/exports_put_files_csv.Rd index aff1f117..81e206b5 100644 --- a/man/exports_put_files_csv.Rd +++ b/man/exports_put_files_csv.Rd @@ -80,6 +80,7 @@ A list containing the following elements: \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Replace all attributes of this CSV Export diff --git a/man/exports_put_files_csv_archive.Rd b/man/exports_put_files_csv_archive.Rd index 30b6cbec..72d76d34 100644 --- a/man/exports_put_files_csv_archive.Rd +++ b/man/exports_put_files_csv_archive.Rd @@ -38,6 +38,7 @@ A list containing the following elements: \item{hidden}{boolean, A boolean value indicating whether or not this request should be hidden. Defaults to false.} \item{forceMultifile}{boolean, Whether or not the csv should be split into multiple files. Default: false} \item{maxFileSize}{integer, The max file size, in MB, created files will be. Only available when force_multifile is true. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update the archive status of this object diff --git a/man/feature_flags_delete_organizations.Rd b/man/feature_flags_delete_organizations.Rd new file mode 100644 index 00000000..80511105 --- /dev/null +++ b/man/feature_flags_delete_organizations.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{feature_flags_delete_organizations} +\alias{feature_flags_delete_organizations} +\title{Deactivate a feature for a organization} +\usage{ +feature_flags_delete_organizations(flag_name, organization_id) +} +\arguments{ +\item{flag_name}{string required. The feature flag name.} + +\item{organization_id}{integer required. Organization ID.} +} +\value{ +An empty HTTP response +} +\description{ +Deactivate a feature for a organization +} diff --git a/man/feature_flags_delete_users.Rd b/man/feature_flags_delete_users.Rd index 35e35cbf..520ca8c8 100644 --- a/man/feature_flags_delete_users.Rd +++ b/man/feature_flags_delete_users.Rd @@ -7,7 +7,7 @@ feature_flags_delete_users(flag_name, user_id) } \arguments{ -\item{flag_name}{string required. The feature flag name.} +\item{flag_name}{string required. The feature name.} \item{user_id}{integer required. The user ID.} } diff --git a/man/feature_flags_get.Rd b/man/feature_flags_get.Rd index acc5fed1..9b7cdd83 100644 --- a/man/feature_flags_get.Rd +++ b/man/feature_flags_get.Rd @@ -7,11 +7,12 @@ feature_flags_get(name) } \arguments{ -\item{name}{string required. The name of the feature flag.} +\item{name}{string required. The name of the feature.} } \value{ A list containing the following elements: -\item{name}{string, The name of the feature flag} +\item{name}{string, The name of the feature.} +\item{description}{string, } \item{organizations}{array, An array containing the following fields: \itemize{ \item id integer, Organization ID diff --git a/man/feature_flags_list.Rd b/man/feature_flags_list.Rd index 3adb7a8e..d3031e25 100644 --- a/man/feature_flags_list.Rd +++ b/man/feature_flags_list.Rd @@ -8,16 +8,16 @@ feature_flags_list() } \value{ An array containing the following fields: -\item{name}{string, The name of the feature flag.} -\item{userCount}{integer, The number of users with this feature flag enabled.} +\item{name}{string, The name of the feature.} \item{description}{string, } +\item{activeForMe}{boolean, Whether the feature is active for the current user.} +\item{userCount}{integer, The number of users with this feature flag enabled.} \item{team}{string, } \item{jira}{string, } \item{added}{string, } \item{groupCount}{integer, } \item{organizationCount}{integer, } \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} -\item{activeForMe}{boolean, Whether the feature flag is active for the current user.} } \description{ List feature flags diff --git a/man/feature_flags_put_groups.Rd b/man/feature_flags_put_groups.Rd index 9759b99c..52d7b525 100644 --- a/man/feature_flags_put_groups.Rd +++ b/man/feature_flags_put_groups.Rd @@ -13,16 +13,16 @@ feature_flags_put_groups(flag_name, group_id) } \value{ A list containing the following elements: -\item{name}{string, The name of the feature flag.} -\item{userCount}{integer, The number of users with this feature flag enabled.} +\item{name}{string, The name of the feature.} \item{description}{string, } +\item{activeForMe}{boolean, Whether the feature is active for the current user.} +\item{userCount}{integer, The number of users with this feature flag enabled.} \item{team}{string, } \item{jira}{string, } \item{added}{string, } \item{groupCount}{integer, } \item{organizationCount}{integer, } \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} -\item{activeForMe}{boolean, Whether the feature flag is active for the current user.} } \description{ Activate a feature for a group diff --git a/man/feature_flags_put_organizations.Rd b/man/feature_flags_put_organizations.Rd new file mode 100644 index 00000000..8059b4c8 --- /dev/null +++ b/man/feature_flags_put_organizations.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{feature_flags_put_organizations} +\alias{feature_flags_put_organizations} +\title{Activate a feature for a organization} +\usage{ +feature_flags_put_organizations(flag_name, organization_id) +} +\arguments{ +\item{flag_name}{string required. The feature flag name.} + +\item{organization_id}{integer required. Organization ID.} +} +\value{ +A list containing the following elements: +\item{name}{string, The name of the feature.} +\item{description}{string, } +\item{activeForMe}{boolean, Whether the feature is active for the current user.} +\item{userCount}{integer, The number of users with this feature flag enabled.} +\item{team}{string, } +\item{jira}{string, } +\item{added}{string, } +\item{groupCount}{integer, } +\item{organizationCount}{integer, } +\item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} +} +\description{ +Activate a feature for a organization +} diff --git a/man/feature_flags_put_users.Rd b/man/feature_flags_put_users.Rd index ca7709b5..881a4d3d 100644 --- a/man/feature_flags_put_users.Rd +++ b/man/feature_flags_put_users.Rd @@ -7,22 +7,22 @@ feature_flags_put_users(flag_name, user_id) } \arguments{ -\item{flag_name}{string required. The feature flag name.} +\item{flag_name}{string required. The feature name.} \item{user_id}{integer required. The user ID.} } \value{ A list containing the following elements: -\item{name}{string, The name of the feature flag.} -\item{userCount}{integer, The number of users with this feature flag enabled.} +\item{name}{string, The name of the feature.} \item{description}{string, } +\item{activeForMe}{boolean, Whether the feature is active for the current user.} +\item{userCount}{integer, The number of users with this feature flag enabled.} \item{team}{string, } \item{jira}{string, } \item{added}{string, } \item{groupCount}{integer, } \item{organizationCount}{integer, } \item{percentage}{integer, The target percentage of users who should have this feature flag enabled.} -\item{activeForMe}{boolean, Whether the feature flag is active for the current user.} } \description{ Activate a feature for a user diff --git a/man/files_get.Rd b/man/files_get.Rd index f678b9e1..812ab539 100644 --- a/man/files_get.Rd +++ b/man/files_get.Rd @@ -37,6 +37,7 @@ A list containing the following elements: \item compression string, The type of compression of the file. One of "gzip", or "none". \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Get details about a file diff --git a/man/files_list_dependencies.Rd b/man/files_list_dependencies.Rd new file mode 100644 index 00000000..6a23d6a4 --- /dev/null +++ b/man/files_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{files_list_dependencies} +\alias{files_list_dependencies} +\title{List dependent objects for this object} +\usage{ +files_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/files_patch.Rd b/man/files_patch.Rd index c36802af..d235a764 100644 --- a/man/files_patch.Rd +++ b/man/files_patch.Rd @@ -37,6 +37,7 @@ A list containing the following elements: \item compression string, The type of compression of the file. One of "gzip", or "none". \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update details about a file diff --git a/man/files_post.Rd b/man/files_post.Rd index ccb0a932..c363ae15 100644 --- a/man/files_post.Rd +++ b/man/files_post.Rd @@ -20,6 +20,7 @@ A list containing the following elements: \item{expiresAt}{string, The date and time the file will expire. If not specified, the file will expire in 30 days. To keep a file indefinitely, specify null.} \item{uploadUrl}{string, The URL that may be used to upload a file. To use the upload URL, initiate a POST request to the given URL with the file you wish to import as the "file" form field.} \item{uploadFields}{list, A hash containing the form fields to be included with the POST request.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Initiate an upload of a file into the platform diff --git a/man/files_put.Rd b/man/files_put.Rd index 25fdc94b..950c989e 100644 --- a/man/files_put.Rd +++ b/man/files_put.Rd @@ -37,6 +37,7 @@ A list containing the following elements: \item compression string, The type of compression of the file. One of "gzip", or "none". \item tableColumns array, An array of hashes corresponding to the columns in the file. Each hash should have keys for column "name" and "sql_type" }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update details about a file diff --git a/man/files_put_transfer.Rd b/man/files_put_transfer.Rd new file mode 100644 index 00000000..22f21122 --- /dev/null +++ b/man/files_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{files_put_transfer} +\alias{files_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +files_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/git_repos_list_refs.Rd b/man/git_repos_list_refs.Rd new file mode 100644 index 00000000..a4f358b4 --- /dev/null +++ b/man/git_repos_list_refs.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{git_repos_list_refs} +\alias{git_repos_list_refs} +\title{Get all branches and tags of a bookmarked git repository} +\usage{ +git_repos_list_refs(id) +} +\arguments{ +\item{id}{integer required. The ID for this git repository.} +} +\value{ +A list containing the following elements: +\item{branches}{array, List of branch names of this git repository.} +\item{tags}{array, List of tag names of this git repository.} +} +\description{ +Get all branches and tags of a bookmarked git repository +} diff --git a/man/groups_get.Rd b/man/groups_get.Rd index c3cd0685..424c1ae3 100644 --- a/man/groups_get.Rd +++ b/man/groups_get.Rd @@ -14,18 +14,21 @@ A list containing the following elements: \item{id}{integer, The ID of this group.} \item{name}{string, This group's name.} \item{createdAt}{string, The date and time when this group was created.} +\item{updatedAt}{string, The date and time when this group was last updated.} \item{description}{string, The description of the group.} \item{slug}{string, The slug for this group.} \item{organizationId}{integer, The ID of the organization this group belongs to.} \item{organizationName}{string, The name of the organization this group belongs to.} -\item{memberCount}{integer, The total number of members in this group.} -\item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} +\item{memberCount}{integer, The number of active members in this group.} +\item{totalMemberCount}{integer, The total number of members in this group.} \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} \item{roleIds}{array, An array of ids of all the roles this group has.} \item{defaultTimeZone}{string, The default time zone of this group.} -\item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +\item{defaultServicesLabel}{string, The default partition label for services of this group.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +\item{createdById}{integer, The ID of the user who created this group.} \item{members}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this user. @@ -33,6 +36,9 @@ A list containing the following elements: \item username string, This user's username. \item initials string, This user's initials. \item online boolean, Whether this user is online. +\item email string, This user's email address. +\item primaryGroupId integer, The ID of the primary group of this user. +\item active boolean, Whether this user account is active or deactivated. }} } \description{ diff --git a/man/groups_list.Rd b/man/groups_list.Rd index 9cb05f42..a5453200 100644 --- a/man/groups_list.Rd +++ b/man/groups_list.Rd @@ -8,6 +8,8 @@ groups_list( query = NULL, permission = NULL, include_members = NULL, + organization_id = NULL, + user_ids = NULL, limit = NULL, page_num = NULL, order = NULL, @@ -15,12 +17,16 @@ groups_list( ) } \arguments{ -\item{query}{string optional. If specified, it will filter the groups returned. Infix matching is supported (e.g., "query=group" will return "group" and "group of people" and "my group" and "my group of people").} +\item{query}{string optional. If specified, it will filter the groups returned.} \item{permission}{string optional. A permissions string, one of "read", "write", or "manage". Lists only groups for which the current user has that permission.} \item{include_members}{boolean optional. Show members of the group.} +\item{organization_id}{integer optional. The organization by which to filter groups.} + +\item{user_ids}{array optional. A list of user IDs to filter groups by.Groups will be returned if any of the users is a member} + \item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} \item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} @@ -34,10 +40,15 @@ An array containing the following fields: \item{id}{integer, The ID of this group.} \item{name}{string, This group's name.} \item{createdAt}{string, The date and time when this group was created.} +\item{updatedAt}{string, The date and time when this group was last updated.} +\item{description}{string, The description of the group.} \item{slug}{string, The slug for this group.} \item{organizationId}{integer, The ID of the organization this group belongs to.} \item{organizationName}{string, The name of the organization this group belongs to.} -\item{memberCount}{integer, The total number of members in this group.} +\item{memberCount}{integer, The number of active members in this group.} +\item{totalMemberCount}{integer, The total number of members in this group.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +\item{createdById}{integer, The ID of the user who created this group.} \item{members}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this user. diff --git a/man/groups_list_child_groups.Rd b/man/groups_list_child_groups.Rd new file mode 100644 index 00000000..d40486a5 --- /dev/null +++ b/man/groups_list_child_groups.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{groups_list_child_groups} +\alias{groups_list_child_groups} +\title{Get child groups of this group} +\usage{ +groups_list_child_groups(id) +} +\arguments{ +\item{id}{integer required. The ID of this group.} +} +\value{ +A list containing the following elements: +\item{manageable}{array, An array containing the following fields: +\itemize{ +\item id integer, +\item name string, +}} +\item{writeable}{array, An array containing the following fields: +\itemize{ +\item id integer, +\item name string, +}} +\item{readable}{array, An array containing the following fields: +\itemize{ +\item id integer, +\item name string, +}} +} +\description{ +Get child groups of this group +} diff --git a/man/groups_patch.Rd b/man/groups_patch.Rd index 01840993..77e97719 100644 --- a/man/groups_patch.Rd +++ b/man/groups_patch.Rd @@ -10,7 +10,6 @@ groups_patch( description = NULL, slug = NULL, organization_id = NULL, - must_agree_to_eula = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, @@ -30,37 +29,38 @@ groups_patch( \item{organization_id}{integer optional. The ID of the organization this group belongs to.} -\item{must_agree_to_eula}{boolean optional. Whether or not members of this group must sign the EULA.} - \item{default_otp_required_for_login}{boolean optional. The two factor authentication requirement for this group.} \item{role_ids}{array optional. An array of ids of all the roles this group has.} \item{default_time_zone}{string optional. The default time zone of this group.} -\item{default_jobs_label}{string optional. The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_jobs_label}{string optional. The default partition label for jobs of this group.} -\item{default_notebooks_label}{string optional. The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_notebooks_label}{string optional. The default partition label for notebooks of this group.} -\item{default_services_label}{string optional. The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_services_label}{string optional. The default partition label for services of this group.} } \value{ A list containing the following elements: \item{id}{integer, The ID of this group.} \item{name}{string, This group's name.} \item{createdAt}{string, The date and time when this group was created.} +\item{updatedAt}{string, The date and time when this group was last updated.} \item{description}{string, The description of the group.} \item{slug}{string, The slug for this group.} \item{organizationId}{integer, The ID of the organization this group belongs to.} \item{organizationName}{string, The name of the organization this group belongs to.} -\item{memberCount}{integer, The total number of members in this group.} -\item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} +\item{memberCount}{integer, The number of active members in this group.} +\item{totalMemberCount}{integer, The total number of members in this group.} \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} \item{roleIds}{array, An array of ids of all the roles this group has.} \item{defaultTimeZone}{string, The default time zone of this group.} -\item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +\item{defaultServicesLabel}{string, The default partition label for services of this group.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +\item{createdById}{integer, The ID of the user who created this group.} \item{members}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this user. @@ -68,6 +68,9 @@ A list containing the following elements: \item username string, This user's username. \item initials string, This user's initials. \item online boolean, Whether this user is online. +\item email string, This user's email address. +\item primaryGroupId integer, The ID of the primary group of this user. +\item active boolean, Whether this user account is active or deactivated. }} } \description{ diff --git a/man/groups_post.Rd b/man/groups_post.Rd index 418c9c35..6a07002a 100644 --- a/man/groups_post.Rd +++ b/man/groups_post.Rd @@ -9,7 +9,6 @@ groups_post( description = NULL, slug = NULL, organization_id = NULL, - must_agree_to_eula = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, @@ -27,37 +26,38 @@ groups_post( \item{organization_id}{integer optional. The ID of the organization this group belongs to.} -\item{must_agree_to_eula}{boolean optional. Whether or not members of this group must sign the EULA.} - \item{default_otp_required_for_login}{boolean optional. The two factor authentication requirement for this group.} \item{role_ids}{array optional. An array of ids of all the roles this group has.} \item{default_time_zone}{string optional. The default time zone of this group.} -\item{default_jobs_label}{string optional. The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_jobs_label}{string optional. The default partition label for jobs of this group.} -\item{default_notebooks_label}{string optional. The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_notebooks_label}{string optional. The default partition label for notebooks of this group.} -\item{default_services_label}{string optional. The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_services_label}{string optional. The default partition label for services of this group.} } \value{ A list containing the following elements: \item{id}{integer, The ID of this group.} \item{name}{string, This group's name.} \item{createdAt}{string, The date and time when this group was created.} +\item{updatedAt}{string, The date and time when this group was last updated.} \item{description}{string, The description of the group.} \item{slug}{string, The slug for this group.} \item{organizationId}{integer, The ID of the organization this group belongs to.} \item{organizationName}{string, The name of the organization this group belongs to.} -\item{memberCount}{integer, The total number of members in this group.} -\item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} +\item{memberCount}{integer, The number of active members in this group.} +\item{totalMemberCount}{integer, The total number of members in this group.} \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} \item{roleIds}{array, An array of ids of all the roles this group has.} \item{defaultTimeZone}{string, The default time zone of this group.} -\item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +\item{defaultServicesLabel}{string, The default partition label for services of this group.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +\item{createdById}{integer, The ID of the user who created this group.} \item{members}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this user. @@ -65,6 +65,9 @@ A list containing the following elements: \item username string, This user's username. \item initials string, This user's initials. \item online boolean, Whether this user is online. +\item email string, This user's email address. +\item primaryGroupId integer, The ID of the primary group of this user. +\item active boolean, Whether this user account is active or deactivated. }} } \description{ diff --git a/man/groups_put.Rd b/man/groups_put.Rd index 01253f43..d82ea293 100644 --- a/man/groups_put.Rd +++ b/man/groups_put.Rd @@ -10,7 +10,6 @@ groups_put( description = NULL, slug = NULL, organization_id = NULL, - must_agree_to_eula = NULL, default_otp_required_for_login = NULL, role_ids = NULL, default_time_zone = NULL, @@ -30,37 +29,38 @@ groups_put( \item{organization_id}{integer optional. The ID of the organization this group belongs to.} -\item{must_agree_to_eula}{boolean optional. Whether or not members of this group must sign the EULA.} - \item{default_otp_required_for_login}{boolean optional. The two factor authentication requirement for this group.} \item{role_ids}{array optional. An array of ids of all the roles this group has.} \item{default_time_zone}{string optional. The default time zone of this group.} -\item{default_jobs_label}{string optional. The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_jobs_label}{string optional. The default partition label for jobs of this group.} -\item{default_notebooks_label}{string optional. The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_notebooks_label}{string optional. The default partition label for notebooks of this group.} -\item{default_services_label}{string optional. The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{default_services_label}{string optional. The default partition label for services of this group.} } \value{ A list containing the following elements: \item{id}{integer, The ID of this group.} \item{name}{string, This group's name.} \item{createdAt}{string, The date and time when this group was created.} +\item{updatedAt}{string, The date and time when this group was last updated.} \item{description}{string, The description of the group.} \item{slug}{string, The slug for this group.} \item{organizationId}{integer, The ID of the organization this group belongs to.} \item{organizationName}{string, The name of the organization this group belongs to.} -\item{memberCount}{integer, The total number of members in this group.} -\item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} +\item{memberCount}{integer, The number of active members in this group.} +\item{totalMemberCount}{integer, The total number of members in this group.} \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} \item{roleIds}{array, An array of ids of all the roles this group has.} \item{defaultTimeZone}{string, The default time zone of this group.} -\item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +\item{defaultServicesLabel}{string, The default partition label for services of this group.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +\item{createdById}{integer, The ID of the user who created this group.} \item{members}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this user. @@ -68,6 +68,9 @@ A list containing the following elements: \item username string, This user's username. \item initials string, This user's initials. \item online boolean, Whether this user is online. +\item email string, This user's email address. +\item primaryGroupId integer, The ID of the primary group of this user. +\item active boolean, Whether this user account is active or deactivated. }} } \description{ diff --git a/man/groups_put_members.Rd b/man/groups_put_members.Rd index ba3152ed..e27b3c07 100644 --- a/man/groups_put_members.Rd +++ b/man/groups_put_members.Rd @@ -16,18 +16,21 @@ A list containing the following elements: \item{id}{integer, The ID of this group.} \item{name}{string, This group's name.} \item{createdAt}{string, The date and time when this group was created.} +\item{updatedAt}{string, The date and time when this group was last updated.} \item{description}{string, The description of the group.} \item{slug}{string, The slug for this group.} \item{organizationId}{integer, The ID of the organization this group belongs to.} \item{organizationName}{string, The name of the organization this group belongs to.} -\item{memberCount}{integer, The total number of members in this group.} -\item{mustAgreeToEula}{boolean, Whether or not members of this group must sign the EULA.} +\item{memberCount}{integer, The number of active members in this group.} +\item{totalMemberCount}{integer, The total number of members in this group.} \item{defaultOtpRequiredForLogin}{boolean, The two factor authentication requirement for this group.} \item{roleIds}{array, An array of ids of all the roles this group has.} \item{defaultTimeZone}{string, The default time zone of this group.} -\item{defaultJobsLabel}{string, The default partition label for jobs of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} -\item{defaultServicesLabel}{string, The default partition label for services of this group. Only available if custom_partitions feature flag is set. Do not use this attribute as it may break in the future.} +\item{defaultJobsLabel}{string, The default partition label for jobs of this group.} +\item{defaultNotebooksLabel}{string, The default partition label for notebooks of this group.} +\item{defaultServicesLabel}{string, The default partition label for services of this group.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this group.} +\item{createdById}{integer, The ID of the user who created this group.} \item{members}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this user. @@ -35,6 +38,9 @@ A list containing the following elements: \item username string, This user's username. \item initials string, This user's initials. \item online boolean, Whether this user is online. +\item email string, This user's email address. +\item primaryGroupId integer, The ID of the primary group of this user. +\item active boolean, Whether this user account is active or deactivated. }} } \description{ diff --git a/man/imports_delete_files_csv_runs.Rd b/man/imports_delete_files_csv_runs.Rd new file mode 100644 index 00000000..129d193d --- /dev/null +++ b/man/imports_delete_files_csv_runs.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{imports_delete_files_csv_runs} +\alias{imports_delete_files_csv_runs} +\title{Cancel a run} +\usage{ +imports_delete_files_csv_runs(id, run_id) +} +\arguments{ +\item{id}{integer required. The ID of the csv_import.} + +\item{run_id}{integer required. The ID of the run.} +} +\value{ +An empty HTTP response +} +\description{ +Cancel a run +} diff --git a/man/imports_get.Rd b/man/imports_get.Rd index cbfc01cf..cb4b7f7e 100644 --- a/man/imports_get.Rd +++ b/man/imports_get.Rd @@ -30,10 +30,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -91,6 +92,7 @@ A list containing the following elements: \item{timeZone}{string, The time zone of this import.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Get details about an import diff --git a/man/imports_get_files_csv.Rd b/man/imports_get_files_csv.Rd index ae4d7f15..7a5fb947 100644 --- a/man/imports_get_files_csv.Rd +++ b/man/imports_get_files_csv.Rd @@ -52,6 +52,7 @@ A list containing the following elements: \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. }} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Get a CSV Import diff --git a/man/imports_get_files_csv_runs.Rd b/man/imports_get_files_csv_runs.Rd new file mode 100644 index 00000000..0e473f3a --- /dev/null +++ b/man/imports_get_files_csv_runs.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{imports_get_files_csv_runs} +\alias{imports_get_files_csv_runs} +\title{Check status of a run} +\usage{ +imports_get_files_csv_runs(id, run_id) +} +\arguments{ +\item{id}{integer required. The ID of the csv_import.} + +\item{run_id}{integer required. The ID of the run.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of the run.} +\item{csvImportId}{integer, The ID of the csv_import.} +\item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +\item{isCancelRequested}{boolean, True if run cancel requested, else false.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} +\item{error}{string, The error, if any, returned by the run.} +} +\description{ +Check status of a run +} diff --git a/man/imports_get_files_runs.Rd b/man/imports_get_files_runs.Rd index a95edb8e..28d6eeda 100644 --- a/man/imports_get_files_runs.Rd +++ b/man/imports_get_files_runs.Rd @@ -17,8 +17,9 @@ A list containing the following elements: \item{importId}{integer, The ID of the import.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/imports_list.Rd b/man/imports_list.Rd index 52c06f37..4a02e088 100644 --- a/man/imports_list.Rd +++ b/man/imports_list.Rd @@ -6,10 +6,10 @@ \usage{ imports_list( type = NULL, - author = NULL, destination = NULL, source = NULL, status = NULL, + author = NULL, hidden = NULL, archived = NULL, limit = NULL, @@ -21,14 +21,14 @@ imports_list( \arguments{ \item{type}{string optional. If specified, return imports of these types. It accepts a comma-separated list, possible values are 'AutoImport', 'DbSync', 'Salesforce', 'GdocImport'.} -\item{author}{string optional. If specified, return imports from this author. It accepts a comma-separated list of author ids.} - \item{destination}{string optional. If specified, returns imports with one of these destinations. It accepts a comma-separated list of remote host ids.} \item{source}{string optional. If specified, returns imports with one of these sources. It accepts a comma-separated list of remote host ids. 'DbSync' must be specified for 'type'.} \item{status}{string optional. If specified, returns imports with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} \item{archived}{string optional. The archival status of the requested item(s).} @@ -62,10 +62,11 @@ An array containing the following fields: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{id}{integer, The ID for the import.} \item{isOutbound}{boolean, } diff --git a/man/imports_list_dependencies.Rd b/man/imports_list_dependencies.Rd new file mode 100644 index 00000000..9456ce51 --- /dev/null +++ b/man/imports_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{imports_list_dependencies} +\alias{imports_list_dependencies} +\title{List dependent objects for this object} +\usage{ +imports_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/imports_list_files_csv_runs.Rd b/man/imports_list_files_csv_runs.Rd new file mode 100644 index 00000000..26267e1c --- /dev/null +++ b/man/imports_list_files_csv_runs.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{imports_list_files_csv_runs} +\alias{imports_list_files_csv_runs} +\title{List runs for the given csv_import} +\usage{ +imports_list_files_csv_runs( + id, + limit = NULL, + page_num = NULL, + order = NULL, + order_dir = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the csv_import.} + +\item{limit}{integer optional. Number of results to return. Defaults to 20. Maximum allowed is 100.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to id. Must be one of: id.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.} +} +\value{ +An array containing the following fields: +\item{id}{integer, The ID of the run.} +\item{csvImportId}{integer, The ID of the csv_import.} +\item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +\item{isCancelRequested}{boolean, True if run cancel requested, else false.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} +\item{error}{string, The error, if any, returned by the run.} +} +\description{ +List runs for the given csv_import +} diff --git a/man/imports_list_files_csv_runs_logs.Rd b/man/imports_list_files_csv_runs_logs.Rd new file mode 100644 index 00000000..7affc368 --- /dev/null +++ b/man/imports_list_files_csv_runs_logs.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{imports_list_files_csv_runs_logs} +\alias{imports_list_files_csv_runs_logs} +\title{Get the logs for a run} +\usage{ +imports_list_files_csv_runs_logs(id, run_id, last_id = NULL, limit = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the csv_import.} + +\item{run_id}{integer required. The ID of the run.} + +\item{last_id}{integer optional. The ID of the last log message received. Log entries with this ID value or lower will be omitted.Logs are sorted by ID if this value is provided, and are otherwise sorted by createdAt.} + +\item{limit}{integer optional. The maximum number of log messages to return. Default of 10000.} +} +\value{ +An array containing the following fields: +\item{id}{integer, The ID of the log.} +\item{createdAt}{string, The time the log was created.} +\item{message}{string, The log message.} +\item{level}{string, The level of the log. One of unknown,fatal,error,warn,info,debug.} +} +\description{ +Get the logs for a run +} diff --git a/man/imports_list_files_runs.Rd b/man/imports_list_files_runs.Rd index f0018c52..89cdd19b 100644 --- a/man/imports_list_files_runs.Rd +++ b/man/imports_list_files_runs.Rd @@ -29,8 +29,9 @@ An array containing the following fields: \item{importId}{integer, The ID of the import.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/imports_patch_files_csv.Rd b/man/imports_patch_files_csv.Rd index ba7f4f2b..68539938 100644 --- a/man/imports_patch_files_csv.Rd +++ b/man/imports_patch_files_csv.Rd @@ -119,6 +119,7 @@ A list containing the following elements: \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. }} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update some attributes of this CSV Import diff --git a/man/imports_post.Rd b/man/imports_post.Rd index f47b4041..739a7a5e 100644 --- a/man/imports_post.Rd +++ b/man/imports_post.Rd @@ -42,10 +42,11 @@ imports_post( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -91,10 +92,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -152,6 +154,7 @@ A list containing the following elements: \item{timeZone}{string, The time zone of this import.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Create a new import configuration diff --git a/man/imports_post_files_csv.Rd b/man/imports_post_files_csv.Rd index 8cc816eb..9f7697da 100644 --- a/man/imports_post_files_csv.Rd +++ b/man/imports_post_files_csv.Rd @@ -119,6 +119,7 @@ A list containing the following elements: \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. }} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Create a CSV Import diff --git a/man/imports_post_files_csv_runs.Rd b/man/imports_post_files_csv_runs.Rd new file mode 100644 index 00000000..97cb2dbb --- /dev/null +++ b/man/imports_post_files_csv_runs.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{imports_post_files_csv_runs} +\alias{imports_post_files_csv_runs} +\title{Start a run} +\usage{ +imports_post_files_csv_runs(id) +} +\arguments{ +\item{id}{integer required. The ID of the csv_import.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of the run.} +\item{csvImportId}{integer, The ID of the csv_import.} +\item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} +\item{isCancelRequested}{boolean, True if run cancel requested, else false.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} +\item{error}{string, The error, if any, returned by the run.} +} +\description{ +Start a run +} diff --git a/man/imports_post_files_runs.Rd b/man/imports_post_files_runs.Rd index 0318975c..6779f0e4 100644 --- a/man/imports_post_files_runs.Rd +++ b/man/imports_post_files_runs.Rd @@ -15,8 +15,9 @@ A list containing the following elements: \item{importId}{integer, The ID of the import.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/imports_post_syncs.Rd b/man/imports_post_syncs.Rd index 27c0e93d..4bff1ce5 100644 --- a/man/imports_post_syncs.Rd +++ b/man/imports_post_syncs.Rd @@ -74,7 +74,7 @@ imports_post_syncs(id, source, destination, advanced_options = NULL) \item partitionTablePartitionColumnMaxName string, This parameter is deprecated \item lastModifiedColumn string, \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -\item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. +\item chunkingMethod string, This parameter is deprecated \item firstRowIsHeader boolean, \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. @@ -153,7 +153,7 @@ A list containing the following elements: \item partitionTablePartitionColumnMaxName string, This parameter is deprecated \item lastModifiedColumn string, \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -\item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. +\item chunkingMethod string, This parameter is deprecated \item firstRowIsHeader boolean, \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. diff --git a/man/imports_put.Rd b/man/imports_put.Rd index 5317abfd..4a125cab 100644 --- a/man/imports_put.Rd +++ b/man/imports_put.Rd @@ -44,10 +44,11 @@ imports_put( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -91,10 +92,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -152,6 +154,7 @@ A list containing the following elements: \item{timeZone}{string, The time zone of this import.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update an import diff --git a/man/imports_put_archive.Rd b/man/imports_put_archive.Rd index 89e346a6..e26b90d2 100644 --- a/man/imports_put_archive.Rd +++ b/man/imports_put_archive.Rd @@ -32,10 +32,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -93,6 +94,7 @@ A list containing the following elements: \item{timeZone}{string, The time zone of this import.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update the archive status of this object diff --git a/man/imports_put_files_csv.Rd b/man/imports_put_files_csv.Rd index bd4d1f4a..05a4dc10 100644 --- a/man/imports_put_files_csv.Rd +++ b/man/imports_put_files_csv.Rd @@ -119,6 +119,7 @@ A list containing the following elements: \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. }} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Replace all attributes of this CSV Import diff --git a/man/imports_put_files_csv_archive.Rd b/man/imports_put_files_csv_archive.Rd index 75ce7c3e..d778dc14 100644 --- a/man/imports_put_files_csv_archive.Rd +++ b/man/imports_put_files_csv_archive.Rd @@ -54,6 +54,7 @@ A list containing the following elements: \item sortkeys array, Sortkeys for this table in Redshift. Please provide a maximum of two. }} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update the archive status of this object diff --git a/man/imports_put_syncs.Rd b/man/imports_put_syncs.Rd index 33845ded..bbd8f769 100644 --- a/man/imports_put_syncs.Rd +++ b/man/imports_put_syncs.Rd @@ -76,7 +76,7 @@ imports_put_syncs(id, sync_id, source, destination, advanced_options = NULL) \item partitionTablePartitionColumnMaxName string, This parameter is deprecated \item lastModifiedColumn string, \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -\item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. +\item chunkingMethod string, This parameter is deprecated \item firstRowIsHeader boolean, \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. @@ -155,7 +155,7 @@ A list containing the following elements: \item partitionTablePartitionColumnMaxName string, This parameter is deprecated \item lastModifiedColumn string, \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -\item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. +\item chunkingMethod string, This parameter is deprecated \item firstRowIsHeader boolean, \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. diff --git a/man/imports_put_syncs_archive.Rd b/man/imports_put_syncs_archive.Rd index 36088650..2abb72c0 100644 --- a/man/imports_put_syncs_archive.Rd +++ b/man/imports_put_syncs_archive.Rd @@ -83,7 +83,7 @@ A list containing the following elements: \item partitionTablePartitionColumnMaxName string, This parameter is deprecated \item lastModifiedColumn string, \item mysqlCatalogMatchesSchema boolean, This attribute is no longer available; defaults to true but cannot be used. -\item chunkingMethod string, The method used to break the data into smaller chunks for transfer. The value can be set to sorted_by_identity_columns or if not set the chunking method will be chosen automatically. +\item chunkingMethod string, This parameter is deprecated \item firstRowIsHeader boolean, \item exportAction string, The kind of export action you want to have the export execute. Set to "newsprsht" if you want a new worksheet inside a new spreadsheet. Set to "newwksht" if you want a new worksheet inside an existing spreadsheet. Set to "updatewksht" if you want to overwrite an existing worksheet inside an existing spreadsheet. Set to "appendwksht" if you want to append to the end of an existing worksheet inside an existing spreadsheet. Default is set to "newsprsht" \item sqlQuery string, If you are doing a Google Sheet export, this is your SQL query. diff --git a/man/imports_put_transfer.Rd b/man/imports_put_transfer.Rd new file mode 100644 index 00000000..6bc9d7eb --- /dev/null +++ b/man/imports_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{imports_put_transfer} +\alias{imports_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +imports_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/jobs_get.Rd b/man/jobs_get.Rd index db6346e3..6b355483 100644 --- a/man/jobs_get.Rd +++ b/man/jobs_get.Rd @@ -38,6 +38,15 @@ A list containing the following elements: }} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{successEmailSubject}{string, } \item{successEmailBody}{string, } \item{runningAsUser}{string, } @@ -45,10 +54,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} } \description{ diff --git a/man/jobs_list.Rd b/man/jobs_list.Rd index b61e1823..46ae662c 100644 --- a/man/jobs_list.Rd +++ b/man/jobs_list.Rd @@ -12,6 +12,7 @@ jobs_list( scheduled = NULL, hidden = NULL, archived = NULL, + author = NULL, limit = NULL, page_num = NULL, order = NULL, @@ -33,6 +34,8 @@ jobs_list( \item{archived}{string optional. The archival status of the requested item(s).} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{limit}{integer optional. Number of results to return. Defaults to its maximum of 50.} \item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} @@ -60,13 +63,22 @@ An array containing the following fields: \item error string, The error message for this run, if present. }} \item{archived}{string, The archival status of the requested item(s).} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} } \description{ diff --git a/man/jobs_list_dependencies.Rd b/man/jobs_list_dependencies.Rd new file mode 100644 index 00000000..fc0e1f8d --- /dev/null +++ b/man/jobs_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{jobs_list_dependencies} +\alias{jobs_list_dependencies} +\title{List dependent objects for this object} +\usage{ +jobs_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/jobs_list_parents.Rd b/man/jobs_list_parents.Rd index fbe40d78..667f8c49 100644 --- a/man/jobs_list_parents.Rd +++ b/man/jobs_list_parents.Rd @@ -38,6 +38,15 @@ A list containing the following elements: }} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{successEmailSubject}{string, } \item{successEmailBody}{string, } \item{runningAsUser}{string, } @@ -45,10 +54,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} } \description{ diff --git a/man/jobs_list_workflows.Rd b/man/jobs_list_workflows.Rd index feb70edb..6f95bb54 100644 --- a/man/jobs_list_workflows.Rd +++ b/man/jobs_list_workflows.Rd @@ -30,10 +30,11 @@ An array containing the following fields: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} diff --git a/man/jobs_put_archive.Rd b/man/jobs_put_archive.Rd index 8ff47d5e..43a13675 100644 --- a/man/jobs_put_archive.Rd +++ b/man/jobs_put_archive.Rd @@ -40,6 +40,15 @@ A list containing the following elements: }} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{successEmailSubject}{string, } \item{successEmailBody}{string, } \item{runningAsUser}{string, } @@ -47,10 +56,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} } \description{ diff --git a/man/jobs_put_transfer.Rd b/man/jobs_put_transfer.Rd new file mode 100644 index 00000000..61808a6e --- /dev/null +++ b/man/jobs_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{jobs_put_transfer} +\alias{jobs_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +jobs_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/json_values_list_dependencies.Rd b/man/json_values_list_dependencies.Rd new file mode 100644 index 00000000..987e2dc3 --- /dev/null +++ b/man/json_values_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{json_values_list_dependencies} +\alias{json_values_list_dependencies} +\title{List dependent objects for this object} +\usage{ +json_values_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/json_values_put_transfer.Rd b/man/json_values_put_transfer.Rd new file mode 100644 index 00000000..bea0651d --- /dev/null +++ b/man/json_values_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{json_values_put_transfer} +\alias{json_values_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +json_values_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/media_get_optimizations_runs.Rd b/man/media_get_optimizations_runs.Rd index 8a433284..1dd58286 100644 --- a/man/media_get_optimizations_runs.Rd +++ b/man/media_get_optimizations_runs.Rd @@ -17,8 +17,9 @@ A list containing the following elements: \item{optimizationId}{integer, The ID of the optimization.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/media_list_optimizations_runs.Rd b/man/media_list_optimizations_runs.Rd index 102a3e19..6bd6fd65 100644 --- a/man/media_list_optimizations_runs.Rd +++ b/man/media_list_optimizations_runs.Rd @@ -29,8 +29,9 @@ An array containing the following fields: \item{optimizationId}{integer, The ID of the optimization.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/media_post_optimizations_runs.Rd b/man/media_post_optimizations_runs.Rd index 2e2ba87f..2bb64eb1 100644 --- a/man/media_post_optimizations_runs.Rd +++ b/man/media_post_optimizations_runs.Rd @@ -15,8 +15,9 @@ A list containing the following elements: \item{optimizationId}{integer, The ID of the optimization.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/models_get.Rd b/man/models_get.Rd index 9410ffe3..5ad395ca 100644 --- a/man/models_get.Rd +++ b/man/models_get.Rd @@ -44,10 +44,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, The ID of the parent job that will trigger this model.} \item{runningAs}{list, A list containing the following elements: diff --git a/man/models_list.Rd b/man/models_list.Rd index bb6d7b9e..434cbe83 100644 --- a/man/models_list.Rd +++ b/man/models_list.Rd @@ -8,8 +8,8 @@ models_list( model_name = NULL, training_table_name = NULL, dependent_variable = NULL, - author = NULL, status = NULL, + author = NULL, hidden = NULL, archived = NULL, limit = NULL, @@ -25,10 +25,10 @@ models_list( \item{dependent_variable}{string optional. If specified, will be used to filter the models returned by the dependent variable column name. Substring matching is supported. (e.g., "dependentVariable=predictor" will return both "predictor" and "my predictor").} -\item{author}{string optional. If specified, return models from this author. It accepts a comma-separated list of author ids.} - \item{status}{string optional. If specified, returns models with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} \item{archived}{string optional. The archival status of the requested item(s).} @@ -62,10 +62,11 @@ An array containing the following fields: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, The ID of the parent job that will trigger this model.} \item{timeZone}{string, The time zone of this model.} diff --git a/man/models_list_dependencies.Rd b/man/models_list_dependencies.Rd new file mode 100644 index 00000000..d77c50a0 --- /dev/null +++ b/man/models_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{models_list_dependencies} +\alias{models_list_dependencies} +\title{List dependent objects for this object} +\usage{ +models_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/models_list_schedules.Rd b/man/models_list_schedules.Rd index 3ddbfc7c..1434a521 100644 --- a/man/models_list_schedules.Rd +++ b/man/models_list_schedules.Rd @@ -15,10 +15,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} } \description{ diff --git a/man/models_put_archive.Rd b/man/models_put_archive.Rd index 89a384bb..b5f72feb 100644 --- a/man/models_put_archive.Rd +++ b/man/models_put_archive.Rd @@ -46,10 +46,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{parentId}{integer, The ID of the parent job that will trigger this model.} \item{runningAs}{list, A list containing the following elements: diff --git a/man/models_put_transfer.Rd b/man/models_put_transfer.Rd new file mode 100644 index 00000000..539213d3 --- /dev/null +++ b/man/models_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{models_put_transfer} +\alias{models_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +models_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/notebooks_get.Rd b/man/notebooks_get.Rd index 5203a642..834fe92d 100644 --- a/man/notebooks_get.Rd +++ b/man/notebooks_get.Rd @@ -45,22 +45,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item notebookId integer, The ID of owning Notebook }} \item{credentials}{array, A list of credential IDs to pass to the notebook.} \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +\item{partitionLabel}{string, The partition label used to run this object.} \item{gitRepoId}{integer, The ID of the git repository.} \item{gitRepoUrl}{string, The url of the git repository} \item{gitRef}{string, The git reference if git repo is specified} \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/notebooks_get_deployments.Rd b/man/notebooks_get_deployments.Rd index bc04f50d..eb36466c 100644 --- a/man/notebooks_get_deployments.Rd +++ b/man/notebooks_get_deployments.Rd @@ -21,13 +21,14 @@ A list containing the following elements: \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} \item{displayUrl}{string, A signed URL for viewing the deployed item.} \item{instanceType}{string, The EC2 instance type requested for the deployment.} -\item{memory}{integer, The memory allocated to the deployment.} -\item{cpu}{integer, The cpu allocated to the deployment.} +\item{memory}{integer, The memory allocated to the deployment, in MB.} +\item{cpu}{integer, The cpu allocated to the deployment, in millicores.} \item{state}{string, The state of the deployment.} \item{stateMessage}{string, A detailed description of the state.} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } -\item{published}{boolean, } \item{notebookId}{integer, The ID of owning Notebook} } \description{ diff --git a/man/notebooks_get_git_commits.Rd b/man/notebooks_get_git_commits.Rd index 29749708..c23bbf13 100644 --- a/man/notebooks_get_git_commits.Rd +++ b/man/notebooks_get_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{notebooks_get_git_commits} \alias{notebooks_get_git_commits} -\title{Get file contents at commit_hash} +\title{Get file contents at git ref} \usage{ notebooks_get_git_commits(id, commit_hash) } @@ -19,5 +19,5 @@ A list containing the following elements: \item{fileHash}{string, The SHA of the file.} } \description{ -Get file contents at commit_hash +Get file contents at git ref } diff --git a/man/notebooks_list.Rd b/man/notebooks_list.Rd index 7260f6d8..6f3a08c2 100644 --- a/man/notebooks_list.Rd +++ b/man/notebooks_list.Rd @@ -20,7 +20,7 @@ notebooks_list( \item{archived}{string optional. The archival status of the requested item(s).} -\item{author}{string optional. If specified, return imports from this author. It accepts a comma-separated list of author IDs.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} \item{status}{string optional. If specified, returns notebooks with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'pending', 'idle'.} @@ -57,13 +57,14 @@ An array containing the following fields: \item dockerImageName string, The name of the docker image to pull from DockerHub. \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item notebookId integer, The ID of owning Notebook }} \item{archived}{string, The archival status of the requested item(s).} diff --git a/man/notebooks_list_dependencies.Rd b/man/notebooks_list_dependencies.Rd new file mode 100644 index 00000000..44946ad6 --- /dev/null +++ b/man/notebooks_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{notebooks_list_dependencies} +\alias{notebooks_list_dependencies} +\title{List dependent objects for this object} +\usage{ +notebooks_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/notebooks_list_deployments.Rd b/man/notebooks_list_deployments.Rd index 12a5b83c..0b86f7e8 100644 --- a/man/notebooks_list_deployments.Rd +++ b/man/notebooks_list_deployments.Rd @@ -35,13 +35,14 @@ An array containing the following fields: \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} \item{instanceType}{string, The EC2 instance type requested for the deployment.} -\item{memory}{integer, The memory allocated to the deployment.} -\item{cpu}{integer, The cpu allocated to the deployment.} +\item{memory}{integer, The memory allocated to the deployment, in MB.} +\item{cpu}{integer, The cpu allocated to the deployment, in millicores.} \item{state}{string, The state of the deployment.} \item{stateMessage}{string, A detailed description of the state.} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } -\item{published}{boolean, } \item{notebookId}{integer, The ID of owning Notebook} } \description{ diff --git a/man/notebooks_list_git.Rd b/man/notebooks_list_git.Rd index 2bee7075..2a4b51e3 100644 --- a/man/notebooks_list_git.Rd +++ b/man/notebooks_list_git.Rd @@ -11,7 +11,7 @@ notebooks_list_git(id) } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -21,7 +21,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Get the git metadata attached to an item diff --git a/man/notebooks_list_git_commits.Rd b/man/notebooks_list_git_commits.Rd index 7210fc40..34fa5b7f 100644 --- a/man/notebooks_list_git_commits.Rd +++ b/man/notebooks_list_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{notebooks_list_git_commits} \alias{notebooks_list_git_commits} -\title{Get the git commits for an item} +\title{Get the git commits for an item on the current branch} \usage{ notebooks_list_git_commits(id) } @@ -17,5 +17,5 @@ A list containing the following elements: \item{message}{string, The commit message.} } \description{ -Get the git commits for an item +Get the git commits for an item on the current branch } diff --git a/man/notebooks_patch.Rd b/man/notebooks_patch.Rd index 48e08a6f..cc9932a5 100644 --- a/man/notebooks_patch.Rd +++ b/man/notebooks_patch.Rd @@ -20,6 +20,7 @@ notebooks_patch( credentials = NULL, environment_variables = NULL, idle_timeout = NULL, + partition_label = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL @@ -56,6 +57,8 @@ notebooks_patch( \item{idle_timeout}{integer optional. How long the notebook will stay alive without any kernel activity.} +\item{partition_label}{string optional. The partition label used to run this object.} + \item{git_repo_url}{string optional. The url of the git repository} \item{git_ref}{string optional. The git reference if git repo is specified} @@ -98,22 +101,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item notebookId integer, The ID of owning Notebook }} \item{credentials}{array, A list of credential IDs to pass to the notebook.} \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +\item{partitionLabel}{string, The partition label used to run this object.} \item{gitRepoId}{integer, The ID of the git repository.} \item{gitRepoUrl}{string, The url of the git repository} \item{gitRef}{string, The git reference if git repo is specified} \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/notebooks_patch_git.Rd b/man/notebooks_patch_git.Rd new file mode 100644 index 00000000..0d129353 --- /dev/null +++ b/man/notebooks_patch_git.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{notebooks_patch_git} +\alias{notebooks_patch_git} +\title{Update an attached git file} +\usage{ +notebooks_patch_git( + id, + git_ref = NULL, + git_branch = NULL, + git_path = NULL, + git_repo_url = NULL, + git_ref_type = NULL, + pull_from_git = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the file.} + +\item{git_ref}{string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} + +\item{git_branch}{string optional. The git branch that the file is on.} + +\item{git_path}{string optional. The path of the file in the repository.} + +\item{git_repo_url}{string optional. The URL of the git repository.} + +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + +\item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} +} +\value{ +A list containing the following elements: +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +\item{gitBranch}{string, The git branch that the file is on.} +\item{gitPath}{string, The path of the file in the repository.} +\item{gitRepo}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID for this git repository. +\item repoUrl string, The URL for this git repository. +\item createdAt string, +\item updatedAt string, +}} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +} +\description{ +Update an attached git file +} diff --git a/man/notebooks_post.Rd b/man/notebooks_post.Rd index 858bfe12..0760c47c 100644 --- a/man/notebooks_post.Rd +++ b/man/notebooks_post.Rd @@ -19,6 +19,7 @@ notebooks_post( credentials = NULL, environment_variables = NULL, idle_timeout = NULL, + partition_label = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL, @@ -54,6 +55,8 @@ notebooks_post( \item{idle_timeout}{integer optional. How long the notebook will stay alive without any kernel activity.} +\item{partition_label}{string optional. The partition label used to run this object.} + \item{git_repo_url}{string optional. The url of the git repository} \item{git_ref}{string optional. The git reference if git repo is specified} @@ -98,22 +101,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item notebookId integer, The ID of owning Notebook }} \item{credentials}{array, A list of credential IDs to pass to the notebook.} \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +\item{partitionLabel}{string, The partition label used to run this object.} \item{gitRepoId}{integer, The ID of the git repository.} \item{gitRepoUrl}{string, The url of the git repository} \item{gitRef}{string, The git reference if git repo is specified} \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/notebooks_post_clone.Rd b/man/notebooks_post_clone.Rd index e5ef0da9..ba0a85ab 100644 --- a/man/notebooks_post_clone.Rd +++ b/man/notebooks_post_clone.Rd @@ -45,22 +45,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item notebookId integer, The ID of owning Notebook }} \item{credentials}{array, A list of credential IDs to pass to the notebook.} \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +\item{partitionLabel}{string, The partition label used to run this object.} \item{gitRepoId}{integer, The ID of the git repository.} \item{gitRepoUrl}{string, The url of the git repository} \item{gitRef}{string, The git reference if git repo is specified} \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/notebooks_post_deployments.Rd b/man/notebooks_post_deployments.Rd index 1aecbda6..e78162ff 100644 --- a/man/notebooks_post_deployments.Rd +++ b/man/notebooks_post_deployments.Rd @@ -4,14 +4,12 @@ \alias{notebooks_post_deployments} \title{Deploy a Notebook} \usage{ -notebooks_post_deployments(notebook_id, deployment_id = NULL, published = NULL) +notebooks_post_deployments(notebook_id, deployment_id = NULL) } \arguments{ \item{notebook_id}{integer required. The ID of the owning Notebook} \item{deployment_id}{integer optional. The ID for this deployment} - -\item{published}{boolean optional.} } \value{ A list containing the following elements: @@ -23,13 +21,14 @@ A list containing the following elements: \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} \item{displayUrl}{string, A signed URL for viewing the deployed item.} \item{instanceType}{string, The EC2 instance type requested for the deployment.} -\item{memory}{integer, The memory allocated to the deployment.} -\item{cpu}{integer, The cpu allocated to the deployment.} +\item{memory}{integer, The memory allocated to the deployment, in MB.} +\item{cpu}{integer, The cpu allocated to the deployment, in millicores.} \item{state}{string, The state of the deployment.} \item{stateMessage}{string, A detailed description of the state.} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } -\item{published}{boolean, } \item{notebookId}{integer, The ID of owning Notebook} } \description{ diff --git a/man/notebooks_post_git_checkout.Rd b/man/notebooks_post_git_checkout.Rd new file mode 100644 index 00000000..3b54153c --- /dev/null +++ b/man/notebooks_post_git_checkout.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{notebooks_post_git_checkout} +\alias{notebooks_post_git_checkout} +\title{Checkout content that the existing git_ref points to and save to the object} +\usage{ +notebooks_post_git_checkout(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout content that the existing git_ref points to and save to the object +} diff --git a/man/notebooks_post_git_checkout_latest.Rd b/man/notebooks_post_git_checkout_latest.Rd new file mode 100644 index 00000000..6996e56f --- /dev/null +++ b/man/notebooks_post_git_checkout_latest.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{notebooks_post_git_checkout_latest} +\alias{notebooks_post_git_checkout_latest} +\title{Checkout latest commit on the current branch of a script or workflow} +\usage{ +notebooks_post_git_checkout_latest(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout latest commit on the current branch of a script or workflow +} diff --git a/man/notebooks_put.Rd b/man/notebooks_put.Rd index 62fddfb5..f9bc2fd8 100644 --- a/man/notebooks_put.Rd +++ b/man/notebooks_put.Rd @@ -20,6 +20,7 @@ notebooks_put( credentials = NULL, environment_variables = NULL, idle_timeout = NULL, + partition_label = NULL, git_repo_url = NULL, git_ref = NULL, git_path = NULL @@ -56,6 +57,8 @@ notebooks_put( \item{idle_timeout}{integer optional. How long the notebook will stay alive without any kernel activity.} +\item{partition_label}{string optional. The partition label used to run this object.} + \item{git_repo_url}{string optional. The url of the git repository} \item{git_ref}{string optional. The git reference if git repo is specified} @@ -98,22 +101,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item notebookId integer, The ID of owning Notebook }} \item{credentials}{array, A list of credential IDs to pass to the notebook.} \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +\item{partitionLabel}{string, The partition label used to run this object.} \item{gitRepoId}{integer, The ID of the git repository.} \item{gitRepoUrl}{string, The url of the git repository} \item{gitRef}{string, The git reference if git repo is specified} \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/notebooks_put_archive.Rd b/man/notebooks_put_archive.Rd index 6c02d8d3..10d560ad 100644 --- a/man/notebooks_put_archive.Rd +++ b/man/notebooks_put_archive.Rd @@ -47,22 +47,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item notebookId integer, The ID of owning Notebook }} \item{credentials}{array, A list of credential IDs to pass to the notebook.} \item{environmentVariables}{list, Environment variables to be passed into the Notebook.} \item{idleTimeout}{integer, How long the notebook will stay alive without any kernel activity.} +\item{partitionLabel}{string, The partition label used to run this object.} \item{gitRepoId}{integer, The ID of the git repository.} \item{gitRepoUrl}{string, The url of the git repository} \item{gitRef}{string, The git reference if git repo is specified} \item{gitPath}{string, The path to the .ipynb file in the git repo that will be started up on notebook launch} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/notebooks_put_git.Rd b/man/notebooks_put_git.Rd index 715685e2..75f7de56 100644 --- a/man/notebooks_put_git.Rd +++ b/man/notebooks_put_git.Rd @@ -10,6 +10,7 @@ notebooks_put_git( git_branch = NULL, git_path = NULL, git_repo_url = NULL, + git_ref_type = NULL, pull_from_git = NULL ) } @@ -24,11 +25,13 @@ notebooks_put_git( \item{git_repo_url}{string optional. The URL of the git repository.} +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + \item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -38,7 +41,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Attach an item to a file in a git repo diff --git a/man/notebooks_put_transfer.Rd b/man/notebooks_put_transfer.Rd new file mode 100644 index 00000000..eb0b05fb --- /dev/null +++ b/man/notebooks_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{notebooks_put_transfer} +\alias{notebooks_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +notebooks_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/permission_sets_delete_resources.Rd b/man/permission_sets_delete_resources.Rd new file mode 100644 index 00000000..acd36fa8 --- /dev/null +++ b/man/permission_sets_delete_resources.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_delete_resources} +\alias{permission_sets_delete_resources} +\title{Delete a resource in a permission set} +\usage{ +permission_sets_delete_resources(id, name) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} +} +\value{ +An empty HTTP response +} +\description{ +Delete a resource in a permission set +} diff --git a/man/permission_sets_delete_resources_shares_groups.Rd b/man/permission_sets_delete_resources_shares_groups.Rd new file mode 100644 index 00000000..72ee50a6 --- /dev/null +++ b/man/permission_sets_delete_resources_shares_groups.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_delete_resources_shares_groups} +\alias{permission_sets_delete_resources_shares_groups} +\title{Revoke the permissions a group has on this object} +\usage{ +permission_sets_delete_resources_shares_groups(id, name, group_id) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} + +\item{group_id}{integer required. The ID of the group.} +} +\value{ +An empty HTTP response +} +\description{ +Revoke the permissions a group has on this object +} diff --git a/man/permission_sets_delete_resources_shares_users.Rd b/man/permission_sets_delete_resources_shares_users.Rd new file mode 100644 index 00000000..6c29274c --- /dev/null +++ b/man/permission_sets_delete_resources_shares_users.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_delete_resources_shares_users} +\alias{permission_sets_delete_resources_shares_users} +\title{Revoke the permissions a user has on this object} +\usage{ +permission_sets_delete_resources_shares_users(id, name, user_id) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} + +\item{user_id}{integer required. The ID of the user.} +} +\value{ +An empty HTTP response +} +\description{ +Revoke the permissions a user has on this object +} diff --git a/man/apps_delete_releases_shares_groups.Rd b/man/permission_sets_delete_shares_groups.Rd similarity index 66% rename from man/apps_delete_releases_shares_groups.Rd rename to man/permission_sets_delete_shares_groups.Rd index e122ea75..3cc9412e 100644 --- a/man/apps_delete_releases_shares_groups.Rd +++ b/man/permission_sets_delete_shares_groups.Rd @@ -1,14 +1,12 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_delete_releases_shares_groups} -\alias{apps_delete_releases_shares_groups} +\name{permission_sets_delete_shares_groups} +\alias{permission_sets_delete_shares_groups} \title{Revoke the permissions a group has on this object} \usage{ -apps_delete_releases_shares_groups(slug, id, group_id) +permission_sets_delete_shares_groups(id, group_id) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{group_id}{integer required. The ID of the group.} diff --git a/man/apps_delete_releases_shares_users.Rd b/man/permission_sets_delete_shares_users.Rd similarity index 66% rename from man/apps_delete_releases_shares_users.Rd rename to man/permission_sets_delete_shares_users.Rd index ad2f1155..b62086d2 100644 --- a/man/apps_delete_releases_shares_users.Rd +++ b/man/permission_sets_delete_shares_users.Rd @@ -1,14 +1,12 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_delete_releases_shares_users} -\alias{apps_delete_releases_shares_users} +\name{permission_sets_delete_shares_users} +\alias{permission_sets_delete_shares_users} \title{Revoke the permissions a user has on this object} \usage{ -apps_delete_releases_shares_users(slug, id, user_id) +permission_sets_delete_shares_users(id, user_id) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{user_id}{integer required. The ID of the user.} diff --git a/man/permission_sets_get.Rd b/man/permission_sets_get.Rd new file mode 100644 index 00000000..4965072d --- /dev/null +++ b/man/permission_sets_get.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_get} +\alias{permission_sets_get} +\title{Get a Permission Set} +\usage{ +permission_sets_get(id) +} +\arguments{ +\item{id}{integer required.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID for this permission set.} +\item{name}{string, The name of this permission set.} +\item{description}{string, A description of this permission set.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{archived}{string, The archival status of the requested item(s).} +} +\description{ +Get a Permission Set +} diff --git a/man/permission_sets_get_resources.Rd b/man/permission_sets_get_resources.Rd new file mode 100644 index 00000000..0d313c67 --- /dev/null +++ b/man/permission_sets_get_resources.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_get_resources} +\alias{permission_sets_get_resources} +\title{Get a resource in a permission set} +\usage{ +permission_sets_get_resources(id, name) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} +} +\value{ +A list containing the following elements: +\item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +\item{name}{string, The name of this resource.} +\item{description}{string, A description of this resource.} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +Get a resource in a permission set +} diff --git a/man/permission_sets_list.Rd b/man/permission_sets_list.Rd new file mode 100644 index 00000000..543b4075 --- /dev/null +++ b/man/permission_sets_list.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_list} +\alias{permission_sets_list} +\title{List Permission Sets} +\usage{ +permission_sets_list( + archived = NULL, + author = NULL, + limit = NULL, + page_num = NULL, + order = NULL, + order_dir = NULL +) +} +\arguments{ +\item{archived}{string optional. The archival status of the requested item(s).} + +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + +\item{limit}{integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.} +} +\value{ +An array containing the following fields: +\item{id}{integer, The ID for this permission set.} +\item{name}{string, The name of this permission set.} +\item{description}{string, A description of this permission set.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{archived}{string, The archival status of the requested item(s).} +} +\description{ +List Permission Sets +} diff --git a/man/permission_sets_list_dependencies.Rd b/man/permission_sets_list_dependencies.Rd new file mode 100644 index 00000000..c8aa7e42 --- /dev/null +++ b/man/permission_sets_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_list_dependencies} +\alias{permission_sets_list_dependencies} +\title{List dependent objects for this object} +\usage{ +permission_sets_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/permission_sets_list_resources.Rd b/man/permission_sets_list_resources.Rd new file mode 100644 index 00000000..74f142db --- /dev/null +++ b/man/permission_sets_list_resources.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_list_resources} +\alias{permission_sets_list_resources} +\title{List resources in a permission set} +\usage{ +permission_sets_list_resources( + id, + limit = NULL, + page_num = NULL, + order = NULL, + order_dir = NULL +) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to name. Must be one of: name, id, updated_at, created_at.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc.} +} +\value{ +An array containing the following fields: +\item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +\item{name}{string, The name of this resource.} +\item{description}{string, A description of this resource.} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +List resources in a permission set +} diff --git a/man/permission_sets_list_resources_shares.Rd b/man/permission_sets_list_resources_shares.Rd new file mode 100644 index 00000000..831bcc3c --- /dev/null +++ b/man/permission_sets_list_resources_shares.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_list_resources_shares} +\alias{permission_sets_list_resources_shares} +\title{List users and groups permissioned on this object} +\usage{ +permission_sets_list_resources_shares(id, name) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} +} +\value{ +An array containing the following fields: +\item{readers}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{writers}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{owners}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +\item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +} +\description{ +List users and groups permissioned on this object +} diff --git a/man/apps_list_releases_shares.Rd b/man/permission_sets_list_shares.Rd similarity index 85% rename from man/apps_list_releases_shares.Rd rename to man/permission_sets_list_shares.Rd index c5886d83..fd2fc280 100644 --- a/man/apps_list_releases_shares.Rd +++ b/man/permission_sets_list_shares.Rd @@ -1,14 +1,12 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_list_releases_shares} -\alias{apps_list_releases_shares} +\name{permission_sets_list_shares} +\alias{permission_sets_list_shares} \title{List users and groups permissioned on this object} \usage{ -apps_list_releases_shares(slug, id) +permission_sets_list_shares(id) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} } \value{ diff --git a/man/permission_sets_list_users_permissions.Rd b/man/permission_sets_list_users_permissions.Rd new file mode 100644 index 00000000..37e5ccb7 --- /dev/null +++ b/man/permission_sets_list_users_permissions.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_list_users_permissions} +\alias{permission_sets_list_users_permissions} +\title{Get all permissions for a user, in this permission set} +\usage{ +permission_sets_list_users_permissions(id, user_id) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{user_id}{integer required. The ID for the user.} +} +\value{ +An array containing the following fields: +\item{resourceName}{string, The name of the resource.} +\item{read}{boolean, If true, the user has read permission on this resource.} +\item{write}{boolean, If true, the user has write permission on this resource.} +\item{manage}{boolean, If true, the user has manage permission on this resource.} +} +\description{ +Get all permissions for a user, in this permission set +} diff --git a/man/permission_sets_patch.Rd b/man/permission_sets_patch.Rd new file mode 100644 index 00000000..892d0881 --- /dev/null +++ b/man/permission_sets_patch.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_patch} +\alias{permission_sets_patch} +\title{Update some attributes of this Permission Set} +\usage{ +permission_sets_patch(id, name = NULL, description = NULL) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string optional. The name of this permission set.} + +\item{description}{string optional. A description of this permission set.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID for this permission set.} +\item{name}{string, The name of this permission set.} +\item{description}{string, A description of this permission set.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{archived}{string, The archival status of the requested item(s).} +} +\description{ +Update some attributes of this Permission Set +} diff --git a/man/permission_sets_patch_resources.Rd b/man/permission_sets_patch_resources.Rd new file mode 100644 index 00000000..5278440b --- /dev/null +++ b/man/permission_sets_patch_resources.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_patch_resources} +\alias{permission_sets_patch_resources} +\title{Update a resource in a permission set} +\usage{ +permission_sets_patch_resources(id, name, description = NULL) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} + +\item{description}{string optional. A description of this resource.} +} +\value{ +A list containing the following elements: +\item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +\item{name}{string, The name of this resource.} +\item{description}{string, A description of this resource.} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +Update a resource in a permission set +} diff --git a/man/permission_sets_post.Rd b/man/permission_sets_post.Rd new file mode 100644 index 00000000..bfdbd620 --- /dev/null +++ b/man/permission_sets_post.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_post} +\alias{permission_sets_post} +\title{Create a Permission Set} +\usage{ +permission_sets_post(name, description = NULL) +} +\arguments{ +\item{name}{string required. The name of this permission set.} + +\item{description}{string optional. A description of this permission set.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID for this permission set.} +\item{name}{string, The name of this permission set.} +\item{description}{string, A description of this permission set.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{archived}{string, The archival status of the requested item(s).} +} +\description{ +Create a Permission Set +} diff --git a/man/permission_sets_post_resources.Rd b/man/permission_sets_post_resources.Rd new file mode 100644 index 00000000..47086183 --- /dev/null +++ b/man/permission_sets_post_resources.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_post_resources} +\alias{permission_sets_post_resources} +\title{Create a resource in a permission set} +\usage{ +permission_sets_post_resources(id, name, description = NULL) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} + +\item{description}{string optional. A description of this resource.} +} +\value{ +A list containing the following elements: +\item{permissionSetId}{integer, The ID for the permission set this resource belongs to.} +\item{name}{string, The name of this resource.} +\item{description}{string, A description of this resource.} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +Create a resource in a permission set +} diff --git a/man/permission_sets_put.Rd b/man/permission_sets_put.Rd new file mode 100644 index 00000000..26e45bf9 --- /dev/null +++ b/man/permission_sets_put.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_put} +\alias{permission_sets_put} +\title{Replace all attributes of this Permission Set} +\usage{ +permission_sets_put(id, name, description = NULL) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this permission set.} + +\item{description}{string optional. A description of this permission set.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID for this permission set.} +\item{name}{string, The name of this permission set.} +\item{description}{string, A description of this permission set.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{archived}{string, The archival status of the requested item(s).} +} +\description{ +Replace all attributes of this Permission Set +} diff --git a/man/permission_sets_put_archive.Rd b/man/permission_sets_put_archive.Rd new file mode 100644 index 00000000..1c2eaecd --- /dev/null +++ b/man/permission_sets_put_archive.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_put_archive} +\alias{permission_sets_put_archive} +\title{Update the archive status of this object} +\usage{ +permission_sets_put_archive(id, status) +} +\arguments{ +\item{id}{integer required. The ID of the object.} + +\item{status}{boolean required. The desired archived status of the object.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID for this permission set.} +\item{name}{string, The name of this permission set.} +\item{description}{string, A description of this permission set.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{archived}{string, The archival status of the requested item(s).} +} +\description{ +Update the archive status of this object +} diff --git a/man/permission_sets_put_resources_shares_groups.Rd b/man/permission_sets_put_resources_shares_groups.Rd new file mode 100644 index 00000000..cd6509f5 --- /dev/null +++ b/man/permission_sets_put_resources_shares_groups.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_put_resources_shares_groups} +\alias{permission_sets_put_resources_shares_groups} +\title{Set the permissions groups has on this object} +\usage{ +permission_sets_put_resources_shares_groups( + id, + name, + group_ids, + permission_level, + share_email_body = NULL, + send_shared_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} + +\item{group_ids}{array required. An array of one or more group IDs.} + +\item{permission_level}{string required. Options are: "read", "write", or "manage".} + +\item{share_email_body}{string optional. Custom body text for e-mail sent on a share.} + +\item{send_shared_email}{boolean optional. Send email to the recipients of a share.} +} +\value{ +A list containing the following elements: +\item{readers}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{writers}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{owners}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +\item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +} +\description{ +Set the permissions groups has on this object +} diff --git a/man/permission_sets_put_resources_shares_users.Rd b/man/permission_sets_put_resources_shares_users.Rd new file mode 100644 index 00000000..97dacc74 --- /dev/null +++ b/man/permission_sets_put_resources_shares_users.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_put_resources_shares_users} +\alias{permission_sets_put_resources_shares_users} +\title{Set the permissions users have on this object} +\usage{ +permission_sets_put_resources_shares_users( + id, + name, + user_ids, + permission_level, + share_email_body = NULL, + send_shared_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID for this permission set.} + +\item{name}{string required. The name of this resource.} + +\item{user_ids}{array required. An array of one or more user IDs.} + +\item{permission_level}{string required. Options are: "read", "write", or "manage".} + +\item{share_email_body}{string optional. Custom body text for e-mail sent on a share.} + +\item{send_shared_email}{boolean optional. Send email to the recipients of a share.} +} +\value{ +A list containing the following elements: +\item{readers}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{writers}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{owners}{list, A list containing the following elements: +\itemize{ +\item users array, +\item groups array, +}} +\item{totalUserShares}{integer, For owners, the number of total users shared. For writers and readers, the number of visible users shared.} +\item{totalGroupShares}{integer, For owners, the number of total groups shared. For writers and readers, the number of visible groups shared.} +} +\description{ +Set the permissions users have on this object +} diff --git a/man/apps_put_releases_shares_groups.Rd b/man/permission_sets_put_shares_groups.Rd similarity index 88% rename from man/apps_put_releases_shares_groups.Rd rename to man/permission_sets_put_shares_groups.Rd index a52aefab..e7814488 100644 --- a/man/apps_put_releases_shares_groups.Rd +++ b/man/permission_sets_put_shares_groups.Rd @@ -1,11 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_put_releases_shares_groups} -\alias{apps_put_releases_shares_groups} +\name{permission_sets_put_shares_groups} +\alias{permission_sets_put_shares_groups} \title{Set the permissions groups has on this object} \usage{ -apps_put_releases_shares_groups( - slug, +permission_sets_put_shares_groups( id, group_ids, permission_level, @@ -14,8 +13,6 @@ apps_put_releases_shares_groups( ) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{group_ids}{array required. An array of one or more group IDs.} diff --git a/man/apps_put_releases_shares_users.Rd b/man/permission_sets_put_shares_users.Rd similarity index 88% rename from man/apps_put_releases_shares_users.Rd rename to man/permission_sets_put_shares_users.Rd index 627c5b4c..ae3c4d22 100644 --- a/man/apps_put_releases_shares_users.Rd +++ b/man/permission_sets_put_shares_users.Rd @@ -1,11 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_put_releases_shares_users} -\alias{apps_put_releases_shares_users} +\name{permission_sets_put_shares_users} +\alias{permission_sets_put_shares_users} \title{Set the permissions users have on this object} \usage{ -apps_put_releases_shares_users( - slug, +permission_sets_put_shares_users( id, user_ids, permission_level, @@ -14,8 +13,6 @@ apps_put_releases_shares_users( ) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{user_ids}{array required. An array of one or more user IDs.} diff --git a/man/permission_sets_put_transfer.Rd b/man/permission_sets_put_transfer.Rd new file mode 100644 index 00000000..b90150eb --- /dev/null +++ b/man/permission_sets_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{permission_sets_put_transfer} +\alias{permission_sets_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +permission_sets_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/predictions_get.Rd b/man/predictions_get.Rd index 7f788feb..8843f174 100644 --- a/man/predictions_get.Rd +++ b/man/predictions_get.Rd @@ -40,10 +40,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{limitingSQL}{string, A SQL WHERE clause used to scope the rows to be predicted.} \item{primaryKey}{array, The primary key or composite keys of the table being predicted.} diff --git a/man/predictions_list_schedules.Rd b/man/predictions_list_schedules.Rd index 3c513426..173221c8 100644 --- a/man/predictions_list_schedules.Rd +++ b/man/predictions_list_schedules.Rd @@ -15,10 +15,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{scoreOnModelBuild}{boolean, Whether the prediction will run after a rebuild of the associated model.} } diff --git a/man/projects_get.Rd b/man/projects_get.Rd index 6d7f3c04..68e76750 100644 --- a/man/projects_get.Rd +++ b/man/projects_get.Rd @@ -148,14 +148,6 @@ A list containing the following elements: \item name string, \item lastRun object, }} -\item{appInstances}{array, An array containing the following fields: -\itemize{ -\item id integer, The item's ID. -\item createdAt string, -\item updatedAt string, -\item name string, -\item slug string, -}} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The item's ID. @@ -175,10 +167,13 @@ A list containing the following elements: \item icon string, \item author string, \item updatedAt string, +\item autoShare boolean, \item archived string, The archival status of the requested item(s). \item hidden boolean, The hidden status of the item. +\item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". }} \item{note}{string, } +\item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{parentProject}{list, A list containing the following elements: @@ -186,6 +181,7 @@ A list containing the following elements: \item id integer, The parent project's ID. \item name integer, The parent project's name. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Get a detailed view of a project and the objects in it diff --git a/man/projects_list.Rd b/man/projects_list.Rd index 59b676a8..60df3e91 100644 --- a/man/projects_list.Rd +++ b/man/projects_list.Rd @@ -5,8 +5,9 @@ \title{List projects} \usage{ projects_list( - author = NULL, permission = NULL, + auto_share = NULL, + author = NULL, hidden = NULL, archived = NULL, limit = NULL, @@ -16,10 +17,12 @@ projects_list( ) } \arguments{ -\item{author}{string optional. If specified, return projects owned by this author. It accepts a comma-separated list of author ids.} - \item{permission}{string optional. A permissions string, one of "read", "write", or "manage". Lists only projects for which the current user has that permission.} +\item{auto_share}{boolean optional. Used to filter projects based on whether the project is autoshare enabled or not.} + +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} \item{archived}{string optional. The archival status of the requested item(s).} diff --git a/man/projects_list_dependencies.Rd b/man/projects_list_dependencies.Rd new file mode 100644 index 00000000..6f2b465e --- /dev/null +++ b/man/projects_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{projects_list_dependencies} +\alias{projects_list_dependencies} +\title{List dependent objects for this object} +\usage{ +projects_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/projects_post.Rd b/man/projects_post.Rd index 2cf2834f..bd09340a 100644 --- a/man/projects_post.Rd +++ b/man/projects_post.Rd @@ -4,7 +4,7 @@ \alias{projects_post} \title{Create a project} \usage{ -projects_post(name, description, note = NULL, hidden = NULL) +projects_post(name, description, note = NULL, auto_share = NULL, hidden = NULL) } \arguments{ \item{name}{string required. The name of this project.} @@ -13,6 +13,8 @@ projects_post(name, description, note = NULL, hidden = NULL) \item{note}{string optional. Notes for the project.} +\item{auto_share}{boolean optional. If true, objects within the project will be automatically shared when the project is shared or objects are added.} + \item{hidden}{boolean optional. The hidden status of the item.} } \value{ @@ -154,14 +156,6 @@ A list containing the following elements: \item name string, \item lastRun object, }} -\item{appInstances}{array, An array containing the following fields: -\itemize{ -\item id integer, The item's ID. -\item createdAt string, -\item updatedAt string, -\item name string, -\item slug string, -}} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The item's ID. @@ -181,10 +175,13 @@ A list containing the following elements: \item icon string, \item author string, \item updatedAt string, +\item autoShare boolean, \item archived string, The archival status of the requested item(s). \item hidden boolean, The hidden status of the item. +\item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". }} \item{note}{string, } +\item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{parentProject}{list, A list containing the following elements: @@ -192,6 +189,7 @@ A list containing the following elements: \item id integer, The parent project's ID. \item name integer, The parent project's name. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Create a project diff --git a/man/projects_post_clone.Rd b/man/projects_post_clone.Rd new file mode 100644 index 00000000..30dd0778 --- /dev/null +++ b/man/projects_post_clone.Rd @@ -0,0 +1,192 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{projects_post_clone} +\alias{projects_post_clone} +\title{Clone this} +\usage{ +projects_post_clone(id, clone_schedule = NULL, clone_notifications = NULL) +} +\arguments{ +\item{id}{integer required. The ID for this project.} + +\item{clone_schedule}{boolean optional. If true, also copy the schedule for all applicable project objects.} + +\item{clone_notifications}{boolean optional. If true, also copy the notifications for all applicable project objects.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID for this project.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{name}{string, The name of this project.} +\item{description}{string, A description of the project.} +\item{users}{array, An array containing the following fields: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{autoShare}{boolean, } +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{tables}{array, An array containing the following fields: +\itemize{ +\item schema string, +\item name string, +\item rowCount integer, +\item columnCount integer, +\item createdAt string, +\item updatedAt string, +}} +\item{surveys}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +}} +\item{scripts}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item type string, +\item finishedAt string, +\item state string, +\item lastRun object, +}} +\item{imports}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item type string, +\item finishedAt string, +\item state string, +\item lastRun object, +}} +\item{exports}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item type string, +\item finishedAt string, +\item state string, +\item lastRun object, +}} +\item{models}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item state string, +}} +\item{notebooks}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item currentDeploymentId integer, +\item lastDeploy object, +}} +\item{services}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item currentDeploymentId integer, +\item lastDeploy object, +}} +\item{workflows}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item state string, +\item lastExecution object, +}} +\item{reports}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item state string, +}} +\item{scriptTemplates}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +}} +\item{files}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item fileName string, +\item fileSize integer, +\item expired boolean, +}} +\item{enhancements}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item lastRun object, +}} +\item{projects}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item description string, +}} +\item{allObjects}{array, An array containing the following fields: +\itemize{ +\item projectId integer, +\item objectId integer, +\item objectType string, +\item fcoType string, +\item subType string, +\item name string, +\item icon string, +\item author string, +\item updatedAt string, +\item autoShare boolean, +\item archived string, The archival status of the requested item(s). +\item hidden boolean, The hidden status of the item. +\item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". +}} +\item{note}{string, } +\item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} +\item{hidden}{boolean, The hidden status of the item.} +\item{archived}{string, The archival status of the requested item(s).} +\item{parentProject}{list, A list containing the following elements: +\itemize{ +\item id integer, The parent project's ID. +\item name integer, The parent project's name. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +} +\description{ +Clone this +} diff --git a/man/projects_put.Rd b/man/projects_put.Rd index 40a1ca15..3091ff4e 100644 --- a/man/projects_put.Rd +++ b/man/projects_put.Rd @@ -4,13 +4,7 @@ \alias{projects_put} \title{Update a project} \usage{ -projects_put( - project_id, - name = NULL, - description = NULL, - note = NULL, - auto_share = NULL -) +projects_put(project_id, name = NULL, description = NULL, note = NULL) } \arguments{ \item{project_id}{integer required.} @@ -20,8 +14,6 @@ projects_put( \item{description}{string optional. A description of the project.} \item{note}{string optional. Notes for the project.} - -\item{auto_share}{boolean optional. A toggle for sharing the objects within the project when the project is shared.This does not automatically share new objects to the project.} } \value{ A list containing the following elements: @@ -162,14 +154,6 @@ A list containing the following elements: \item name string, \item lastRun object, }} -\item{appInstances}{array, An array containing the following fields: -\itemize{ -\item id integer, The item's ID. -\item createdAt string, -\item updatedAt string, -\item name string, -\item slug string, -}} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The item's ID. @@ -189,10 +173,13 @@ A list containing the following elements: \item icon string, \item author string, \item updatedAt string, +\item autoShare boolean, \item archived string, The archival status of the requested item(s). \item hidden boolean, The hidden status of the item. +\item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". }} \item{note}{string, } +\item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{parentProject}{list, A list containing the following elements: @@ -200,6 +187,7 @@ A list containing the following elements: \item id integer, The parent project's ID. \item name integer, The parent project's name. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update a project diff --git a/man/projects_put_archive.Rd b/man/projects_put_archive.Rd index bafb712b..17054392 100644 --- a/man/projects_put_archive.Rd +++ b/man/projects_put_archive.Rd @@ -150,14 +150,6 @@ A list containing the following elements: \item name string, \item lastRun object, }} -\item{appInstances}{array, An array containing the following fields: -\itemize{ -\item id integer, The item's ID. -\item createdAt string, -\item updatedAt string, -\item name string, -\item slug string, -}} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The item's ID. @@ -177,10 +169,13 @@ A list containing the following elements: \item icon string, \item author string, \item updatedAt string, +\item autoShare boolean, \item archived string, The archival status of the requested item(s). \item hidden boolean, The hidden status of the item. +\item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". }} \item{note}{string, } +\item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{parentProject}{list, A list containing the following elements: @@ -188,6 +183,7 @@ A list containing the following elements: \item id integer, The parent project's ID. \item name integer, The parent project's name. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update the archive status of this object diff --git a/man/projects_put_auto_share.Rd b/man/projects_put_auto_share.Rd new file mode 100644 index 00000000..0b8cc390 --- /dev/null +++ b/man/projects_put_auto_share.Rd @@ -0,0 +1,190 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{projects_put_auto_share} +\alias{projects_put_auto_share} +\title{Enable or disable Auto-Share on a project} +\usage{ +projects_put_auto_share(project_id, auto_share) +} +\arguments{ +\item{project_id}{integer required.} + +\item{auto_share}{boolean required. A toggle for sharing the objects within the project when the project is shared or objects are added.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID for this project.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{name}{string, The name of this project.} +\item{description}{string, A description of the project.} +\item{users}{array, An array containing the following fields: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{autoShare}{boolean, } +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{tables}{array, An array containing the following fields: +\itemize{ +\item schema string, +\item name string, +\item rowCount integer, +\item columnCount integer, +\item createdAt string, +\item updatedAt string, +}} +\item{surveys}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +}} +\item{scripts}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item type string, +\item finishedAt string, +\item state string, +\item lastRun object, +}} +\item{imports}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item type string, +\item finishedAt string, +\item state string, +\item lastRun object, +}} +\item{exports}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item type string, +\item finishedAt string, +\item state string, +\item lastRun object, +}} +\item{models}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item state string, +}} +\item{notebooks}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item currentDeploymentId integer, +\item lastDeploy object, +}} +\item{services}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item currentDeploymentId integer, +\item lastDeploy object, +}} +\item{workflows}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item state string, +\item lastExecution object, +}} +\item{reports}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item state string, +}} +\item{scriptTemplates}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +}} +\item{files}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item fileName string, +\item fileSize integer, +\item expired boolean, +}} +\item{enhancements}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item lastRun object, +}} +\item{projects}{array, An array containing the following fields: +\itemize{ +\item id integer, The item's ID. +\item createdAt string, +\item updatedAt string, +\item name string, +\item description string, +}} +\item{allObjects}{array, An array containing the following fields: +\itemize{ +\item projectId integer, +\item objectId integer, +\item objectType string, +\item fcoType string, +\item subType string, +\item name string, +\item icon string, +\item author string, +\item updatedAt string, +\item autoShare boolean, +\item archived string, The archival status of the requested item(s). +\item hidden boolean, The hidden status of the item. +\item myPermissionLevel string, Your permission level on the object. One of "read", "write", or "manage". +}} +\item{note}{string, } +\item{canCurrentUserEnableAutoShare}{boolean, A flag for if the current user can enable auto-sharing mode for this project.} +\item{hidden}{boolean, The hidden status of the item.} +\item{archived}{string, The archival status of the requested item(s).} +\item{parentProject}{list, A list containing the following elements: +\itemize{ +\item id integer, The parent project's ID. +\item name integer, The parent project's name. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +} +\description{ +Enable or disable Auto-Share on a project +} diff --git a/man/projects_put_transfer.Rd b/man/projects_put_transfer.Rd new file mode 100644 index 00000000..8f940ad6 --- /dev/null +++ b/man/projects_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{projects_put_transfer} +\alias{projects_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +projects_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/queries_delete.Rd b/man/queries_delete.Rd new file mode 100644 index 00000000..f08260c8 --- /dev/null +++ b/man/queries_delete.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{queries_delete} +\alias{queries_delete} +\title{Sets Query Hidden to true} +\usage{ +queries_delete(id) +} +\arguments{ +\item{id}{integer required. The query ID.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The query ID.} +\item{database}{integer, The database ID.} +\item{sql}{string, The SQL to execute.} +\item{credential}{integer, The credential ID.} +\item{resultRows}{array, A preview of rows returned by the query.} +\item{resultColumns}{array, A preview of columns returned by the query.} +\item{scriptId}{integer, The ID of the script associated with this query.} +\item{exception}{string, Deprecated and not used.} +\item{error}{string, The error message for this run, if present.} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{startedAt}{string, The start time of the last run.} +\item{finishedAt}{string, The end time of the last run.} +\item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} +\item{lastRunId}{integer, The ID of the last run.} +\item{hidden}{boolean, The hidden status of the item.} +\item{archived}{string, The archival status of the requested item(s).} +\item{name}{string, The name of the query.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{reportId}{integer, The ID of the report associated with this query.} +} +\description{ +Sets Query Hidden to true +} diff --git a/man/queries_get.Rd b/man/queries_get.Rd index ab9e1242..1620fe78 100644 --- a/man/queries_get.Rd +++ b/man/queries_get.Rd @@ -22,10 +22,12 @@ A list containing the following elements: \item{error}{string, The error message for this run, if present.} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{startedAt}{string, The start time of the last run.} \item{finishedAt}{string, The end time of the last run.} -\item{state}{string, The state of the last run.} +\item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} \item{lastRunId}{integer, The ID of the last run.} \item{hidden}{boolean, The hidden status of the item.} +\item{archived}{string, The archival status of the requested item(s).} \item{name}{string, The name of the query.} \item{author}{list, A list containing the following elements: \itemize{ @@ -35,7 +37,6 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{startedAt}{string, The start time of the last run.} \item{reportId}{integer, The ID of the report associated with this query.} } \description{ diff --git a/man/queries_get_runs.Rd b/man/queries_get_runs.Rd index c18c6939..f9dae5fc 100644 --- a/man/queries_get_runs.Rd +++ b/man/queries_get_runs.Rd @@ -17,8 +17,9 @@ A list containing the following elements: \item{queryId}{integer, The ID of the query.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/queries_list.Rd b/man/queries_list.Rd index 6124c9e6..1db568cf 100644 --- a/man/queries_list.Rd +++ b/man/queries_list.Rd @@ -2,14 +2,21 @@ % Please edit documentation in R/generated_client.R \name{queries_list} \alias{queries_list} -\title{List} +\title{List queries} \usage{ queries_list( + query = NULL, database_id = NULL, + credential_id = NULL, author_id = NULL, created_before = NULL, + created_after = NULL, + started_before = NULL, + started_after = NULL, + state = NULL, exclude_results = NULL, hidden = NULL, + archived = NULL, limit = NULL, page_num = NULL, order = NULL, @@ -17,21 +24,35 @@ queries_list( ) } \arguments{ +\item{query}{string optional. Space delimited query for searching queries by their SQL. Supports wild card characters "?" for any single character, and "*" for zero or more characters.} + \item{database_id}{integer optional. The database ID.} +\item{credential_id}{integer optional. The credential ID.} + \item{author_id}{integer optional. The author of the query.} \item{created_before}{string optional. An upper bound for the creation date of the query.} +\item{created_after}{string optional. A lower bound for the creation date of the query.} + +\item{started_before}{string optional. An upper bound for the start date of the last run.} + +\item{started_after}{string optional. A lower bound for the start date of the last run.} + +\item{state}{array optional. The state of the last run. One or more of queued, running, succeeded, failed, and cancelled. Specify multiple values as a comma-separated list (e.g., "A,B").} + \item{exclude_results}{boolean optional. If true, does not return cached query results.} \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} +\item{archived}{string optional. The archival status of the requested item(s).} + \item{limit}{integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50.} \item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} -\item{order}{string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at.} +\item{order}{string optional. The field on which to order the result set. Defaults to created_at. Must be one of: created_at, started_at.} \item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.} } @@ -48,13 +69,14 @@ An array containing the following fields: \item{error}{string, The error message for this run, if present.} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{startedAt}{string, The start time of the last run.} \item{finishedAt}{string, The end time of the last run.} -\item{state}{string, The state of the last run.} +\item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} \item{lastRunId}{integer, The ID of the last run.} +\item{archived}{string, The archival status of the requested item(s).} \item{previewRows}{integer, The number of rows to save from the query's result (maximum: 100).} -\item{startedAt}{string, The start time of the last run.} \item{reportId}{integer, The ID of the report associated with this query.} } \description{ -List +List queries } diff --git a/man/queries_list_runs.Rd b/man/queries_list_runs.Rd index 427084ea..c9707e71 100644 --- a/man/queries_list_runs.Rd +++ b/man/queries_list_runs.Rd @@ -29,8 +29,9 @@ An array containing the following fields: \item{queryId}{integer, The ID of the query.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/queries_post.Rd b/man/queries_post.Rd index dfd698f9..eb0d7ff4 100644 --- a/man/queries_post.Rd +++ b/man/queries_post.Rd @@ -54,10 +54,13 @@ A list containing the following elements: \item{error}{string, The error message for this run, if present.} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{startedAt}{string, The start time of the last run.} \item{finishedAt}{string, The end time of the last run.} -\item{state}{string, The state of the last run.} +\item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} \item{lastRunId}{integer, The ID of the last run.} \item{hidden}{boolean, The hidden status of the item.} +\item{archived}{string, The archival status of the requested item(s).} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{interactive}{boolean, Deprecated and not used.} \item{previewRows}{integer, The number of rows to save from the query's result (maximum: 100).} \item{includeHeader}{boolean, Whether the CSV output should include a header row [default: true].} @@ -65,7 +68,6 @@ A list containing the following elements: \item{columnDelimiter}{string, The delimiter to use. One of comma or tab, or pipe [default: comma].} \item{unquoted}{boolean, If true, will not quote fields.} \item{filenamePrefix}{string, The output filename prefix.} -\item{startedAt}{string, The start time of the last run.} \item{reportId}{integer, The ID of the report associated with this query.} } \description{ diff --git a/man/queries_post_runs.Rd b/man/queries_post_runs.Rd index 5f2988c5..b4e5f85d 100644 --- a/man/queries_post_runs.Rd +++ b/man/queries_post_runs.Rd @@ -15,8 +15,9 @@ A list containing the following elements: \item{queryId}{integer, The ID of the query.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/queries_put_scripts.Rd b/man/queries_put_scripts.Rd index bf0276e3..a8979a02 100644 --- a/man/queries_put_scripts.Rd +++ b/man/queries_put_scripts.Rd @@ -24,10 +24,12 @@ A list containing the following elements: \item{error}{string, The error message for this run, if present.} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{startedAt}{string, The start time of the last run.} \item{finishedAt}{string, The end time of the last run.} -\item{state}{string, The state of the last run.} +\item{state}{string, The state of the last run. One of queued, running, succeeded, failed, and cancelled.} \item{lastRunId}{integer, The ID of the last run.} \item{hidden}{boolean, The hidden status of the item.} +\item{archived}{string, The archival status of the requested item(s).} \item{name}{string, The name of the query.} \item{author}{list, A list containing the following elements: \itemize{ @@ -37,7 +39,6 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{startedAt}{string, The start time of the last run.} \item{reportId}{integer, The ID of the report associated with this query.} } \description{ diff --git a/man/apps_delete_instances_shares_groups.Rd b/man/remote_hosts_delete_shares_groups.Rd similarity index 66% rename from man/apps_delete_instances_shares_groups.Rd rename to man/remote_hosts_delete_shares_groups.Rd index 3f1e4a26..2f90e261 100644 --- a/man/apps_delete_instances_shares_groups.Rd +++ b/man/remote_hosts_delete_shares_groups.Rd @@ -1,14 +1,12 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_delete_instances_shares_groups} -\alias{apps_delete_instances_shares_groups} +\name{remote_hosts_delete_shares_groups} +\alias{remote_hosts_delete_shares_groups} \title{Revoke the permissions a group has on this object} \usage{ -apps_delete_instances_shares_groups(slug, id, group_id) +remote_hosts_delete_shares_groups(id, group_id) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{group_id}{integer required. The ID of the group.} diff --git a/man/apps_delete_instances_shares_users.Rd b/man/remote_hosts_delete_shares_users.Rd similarity index 66% rename from man/apps_delete_instances_shares_users.Rd rename to man/remote_hosts_delete_shares_users.Rd index a11abe8b..bbbebadf 100644 --- a/man/apps_delete_instances_shares_users.Rd +++ b/man/remote_hosts_delete_shares_users.Rd @@ -1,14 +1,12 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_delete_instances_shares_users} -\alias{apps_delete_instances_shares_users} +\name{remote_hosts_delete_shares_users} +\alias{remote_hosts_delete_shares_users} \title{Revoke the permissions a user has on this object} \usage{ -apps_delete_instances_shares_users(slug, id, user_id) +remote_hosts_delete_shares_users(id, user_id) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{user_id}{integer required. The ID of the user.} diff --git a/man/remote_hosts_get.Rd b/man/remote_hosts_get.Rd new file mode 100644 index 00000000..1d73bd96 --- /dev/null +++ b/man/remote_hosts_get.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{remote_hosts_get} +\alias{remote_hosts_get} +\title{Get a Remote Host} +\usage{ +remote_hosts_get(id) +} +\arguments{ +\item{id}{integer required.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of the remote host.} +\item{name}{string, The human readable name for the remote host.} +\item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{url}{string, The URL for the remote host.} +\item{description}{string, The description of the remote host.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +Get a Remote Host +} diff --git a/man/remote_hosts_list.Rd b/man/remote_hosts_list.Rd index 2fcc2e3d..799e5114 100644 --- a/man/remote_hosts_list.Rd +++ b/man/remote_hosts_list.Rd @@ -2,20 +2,20 @@ % Please edit documentation in R/generated_client.R \name{remote_hosts_list} \alias{remote_hosts_list} -\title{List the remote hosts} +\title{List Remote Hosts} \usage{ remote_hosts_list(type = NULL) } \arguments{ -\item{type}{string optional. The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{type}{string optional. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} } \value{ An array containing the following fields: \item{id}{integer, The ID of the remote host.} -\item{name}{string, The name of the remote host.} -\item{type}{string, The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} -\item{url}{string, The URL for remote host.} +\item{name}{string, The human readable name for the remote host.} +\item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{url}{string, The URL for the remote host.} } \description{ -List the remote hosts +List Remote Hosts } diff --git a/man/apps_list_instances_shares.Rd b/man/remote_hosts_list_shares.Rd similarity index 85% rename from man/apps_list_instances_shares.Rd rename to man/remote_hosts_list_shares.Rd index db662a6d..4b869ad2 100644 --- a/man/apps_list_instances_shares.Rd +++ b/man/remote_hosts_list_shares.Rd @@ -1,14 +1,12 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_list_instances_shares} -\alias{apps_list_instances_shares} +\name{remote_hosts_list_shares} +\alias{remote_hosts_list_shares} \title{List users and groups permissioned on this object} \usage{ -apps_list_instances_shares(slug, id) +remote_hosts_list_shares(id) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} } \value{ diff --git a/man/remote_hosts_patch.Rd b/man/remote_hosts_patch.Rd new file mode 100644 index 00000000..e236dad3 --- /dev/null +++ b/man/remote_hosts_patch.Rd @@ -0,0 +1,47 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{remote_hosts_patch} +\alias{remote_hosts_patch} +\title{Update some attributes of this Remote Host} +\usage{ +remote_hosts_patch( + id, + name = NULL, + type = NULL, + url = NULL, + description = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the remote host.} + +\item{name}{string optional. The human readable name for the remote host.} + +\item{type}{string optional. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} + +\item{url}{string optional. The URL for the remote host.} + +\item{description}{string optional. The description of the remote host.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of the remote host.} +\item{name}{string, The human readable name for the remote host.} +\item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{url}{string, The URL for the remote host.} +\item{description}{string, The description of the remote host.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +Update some attributes of this Remote Host +} diff --git a/man/remote_hosts_post.Rd b/man/remote_hosts_post.Rd index 145a37e1..bb4d70ac 100644 --- a/man/remote_hosts_post.Rd +++ b/man/remote_hosts_post.Rd @@ -2,24 +2,36 @@ % Please edit documentation in R/generated_client.R \name{remote_hosts_post} \alias{remote_hosts_post} -\title{Create a new remote host} +\title{Create a Remote Host} \usage{ remote_hosts_post(name, url, type) } \arguments{ \item{name}{string required. The human readable name for the remote host.} -\item{url}{string required. The URL to your host.} +\item{url}{string required. The URL for the remote host.} -\item{type}{string required. The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{type}{string required. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} } \value{ A list containing the following elements: \item{id}{integer, The ID of the remote host.} -\item{name}{string, The name of the remote host.} -\item{type}{string, The type of remote host. One of: RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} -\item{url}{string, The URL for remote host.} +\item{name}{string, The human readable name for the remote host.} +\item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{url}{string, The URL for the remote host.} +\item{description}{string, The description of the remote host.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } } \description{ -Create a new remote host +Create a Remote Host } diff --git a/man/remote_hosts_put.Rd b/man/remote_hosts_put.Rd new file mode 100644 index 00000000..5d417ba9 --- /dev/null +++ b/man/remote_hosts_put.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{remote_hosts_put} +\alias{remote_hosts_put} +\title{Replace all attributes of this Remote Host} +\usage{ +remote_hosts_put(id, name, type, url, description) +} +\arguments{ +\item{id}{integer required. The ID of the remote host.} + +\item{name}{string required. The human readable name for the remote host.} + +\item{type}{string required. The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} + +\item{url}{string required. The URL for the remote host.} + +\item{description}{string required. The description of the remote host.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of the remote host.} +\item{name}{string, The human readable name for the remote host.} +\item{type}{string, The type of remote host. One of: RemoteHostTypes::Bigquery, RemoteHostTypes::Bitbucket, RemoteHostTypes::GitSSH, RemoteHostTypes::Github, RemoteHostTypes::GoogleDoc, RemoteHostTypes::JDBC, RemoteHostTypes::Postgres, RemoteHostTypes::Redshift, RemoteHostTypes::S3Storage, and RemoteHostTypes::Salesforce} +\item{url}{string, The URL for the remote host.} +\item{description}{string, The description of the remote host.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +Replace all attributes of this Remote Host +} diff --git a/man/apps_put_instances_shares_groups.Rd b/man/remote_hosts_put_shares_groups.Rd similarity index 88% rename from man/apps_put_instances_shares_groups.Rd rename to man/remote_hosts_put_shares_groups.Rd index 104e7cfc..7ba38313 100644 --- a/man/apps_put_instances_shares_groups.Rd +++ b/man/remote_hosts_put_shares_groups.Rd @@ -1,11 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_put_instances_shares_groups} -\alias{apps_put_instances_shares_groups} +\name{remote_hosts_put_shares_groups} +\alias{remote_hosts_put_shares_groups} \title{Set the permissions groups has on this object} \usage{ -apps_put_instances_shares_groups( - slug, +remote_hosts_put_shares_groups( id, group_ids, permission_level, @@ -14,8 +13,6 @@ apps_put_instances_shares_groups( ) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{group_ids}{array required. An array of one or more group IDs.} diff --git a/man/apps_put_instances_shares_users.Rd b/man/remote_hosts_put_shares_users.Rd similarity index 88% rename from man/apps_put_instances_shares_users.Rd rename to man/remote_hosts_put_shares_users.Rd index d94671ae..9240cd1a 100644 --- a/man/apps_put_instances_shares_users.Rd +++ b/man/remote_hosts_put_shares_users.Rd @@ -1,11 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R -\name{apps_put_instances_shares_users} -\alias{apps_put_instances_shares_users} +\name{remote_hosts_put_shares_users} +\alias{remote_hosts_put_shares_users} \title{Set the permissions users have on this object} \usage{ -apps_put_instances_shares_users( - slug, +remote_hosts_put_shares_users( id, user_ids, permission_level, @@ -14,8 +13,6 @@ apps_put_instances_shares_users( ) } \arguments{ -\item{slug}{string required. The slug for the application.} - \item{id}{integer required. The ID of the resource that is shared.} \item{user_ids}{array required. An array of one or more user IDs.} diff --git a/man/reports_get.Rd b/man/reports_get.Rd index c88c790e..13d2cb52 100644 --- a/man/reports_get.Rd +++ b/man/reports_get.Rd @@ -23,6 +23,7 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The ID for the project. diff --git a/man/reports_get_git_commits.Rd b/man/reports_get_git_commits.Rd index c8b5392e..6aeab907 100644 --- a/man/reports_get_git_commits.Rd +++ b/man/reports_get_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{reports_get_git_commits} \alias{reports_get_git_commits} -\title{Get file contents at commit_hash} +\title{Get file contents at git ref} \usage{ reports_get_git_commits(id, commit_hash) } @@ -19,5 +19,5 @@ A list containing the following elements: \item{fileHash}{string, The SHA of the file.} } \description{ -Get file contents at commit_hash +Get file contents at git ref } diff --git a/man/reports_get_services.Rd b/man/reports_get_services.Rd index 927ad0b7..86a0ab09 100644 --- a/man/reports_get_services.Rd +++ b/man/reports_get_services.Rd @@ -23,12 +23,14 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{host}{string, The host for the service report} \item{displayUrl}{string, The URL to display the service report.} \item{serviceId}{integer, The id of the backing service} \item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} \item{apiKey}{string, A Civis API key that can be used by this report.} \item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +\item{archived}{string, The archival status of the requested item(s).} } \description{ Show a single service report diff --git a/man/reports_list.Rd b/man/reports_list.Rd index 2d4219ba..1f56111a 100644 --- a/man/reports_list.Rd +++ b/man/reports_list.Rd @@ -6,8 +6,8 @@ \usage{ reports_list( type = NULL, - author = NULL, template_id = NULL, + author = NULL, hidden = NULL, archived = NULL, limit = NULL, @@ -19,10 +19,10 @@ reports_list( \arguments{ \item{type}{string optional. If specified, return report of these types. It accepts a comma-separated list, possible values are 'tableau' or 'other'.} -\item{author}{string optional. If specified, return reports from this author. It accepts a comma-separated list of author ids.} - \item{template_id}{integer optional. If specified, return reports using the provided Template.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} \item{archived}{string optional. The archival status of the requested item(s).} diff --git a/man/reports_list_dependencies.Rd b/man/reports_list_dependencies.Rd new file mode 100644 index 00000000..c42ec949 --- /dev/null +++ b/man/reports_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_list_dependencies} +\alias{reports_list_dependencies} +\title{List dependent objects for this object} +\usage{ +reports_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/reports_list_git.Rd b/man/reports_list_git.Rd index 64ed8235..f6711cf8 100644 --- a/man/reports_list_git.Rd +++ b/man/reports_list_git.Rd @@ -11,7 +11,7 @@ reports_list_git(id) } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -21,7 +21,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Get the git metadata attached to an item diff --git a/man/reports_list_git_commits.Rd b/man/reports_list_git_commits.Rd index 85c76f08..079dce88 100644 --- a/man/reports_list_git_commits.Rd +++ b/man/reports_list_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{reports_list_git_commits} \alias{reports_list_git_commits} -\title{Get the git commits for an item} +\title{Get the git commits for an item on the current branch} \usage{ reports_list_git_commits(id) } @@ -17,5 +17,5 @@ A list containing the following elements: \item{message}{string, The commit message.} } \description{ -Get the git commits for an item +Get the git commits for an item on the current branch } diff --git a/man/reports_list_services_dependencies.Rd b/man/reports_list_services_dependencies.Rd new file mode 100644 index 00000000..bf122fef --- /dev/null +++ b/man/reports_list_services_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_list_services_dependencies} +\alias{reports_list_services_dependencies} +\title{List dependent objects for this object} +\usage{ +reports_list_services_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/reports_patch.Rd b/man/reports_patch.Rd index c98b6cc1..667a939c 100644 --- a/man/reports_patch.Rd +++ b/man/reports_patch.Rd @@ -49,6 +49,7 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The ID for the project. diff --git a/man/reports_patch_git.Rd b/man/reports_patch_git.Rd new file mode 100644 index 00000000..b282b9cc --- /dev/null +++ b/man/reports_patch_git.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_patch_git} +\alias{reports_patch_git} +\title{Update an attached git file} +\usage{ +reports_patch_git( + id, + git_ref = NULL, + git_branch = NULL, + git_path = NULL, + git_repo_url = NULL, + git_ref_type = NULL, + pull_from_git = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the file.} + +\item{git_ref}{string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} + +\item{git_branch}{string optional. The git branch that the file is on.} + +\item{git_path}{string optional. The path of the file in the repository.} + +\item{git_repo_url}{string optional. The URL of the git repository.} + +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + +\item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} +} +\value{ +A list containing the following elements: +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +\item{gitBranch}{string, The git branch that the file is on.} +\item{gitPath}{string, The path of the file in the repository.} +\item{gitRepo}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID for this git repository. +\item repoUrl string, The URL for this git repository. +\item createdAt string, +\item updatedAt string, +}} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +} +\description{ +Update an attached git file +} diff --git a/man/reports_patch_services.Rd b/man/reports_patch_services.Rd index b49fa639..70762d32 100644 --- a/man/reports_patch_services.Rd +++ b/man/reports_patch_services.Rd @@ -27,12 +27,14 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{host}{string, The host for the service report} \item{displayUrl}{string, The URL to display the service report.} \item{serviceId}{integer, The id of the backing service} \item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} \item{apiKey}{string, A Civis API key that can be used by this report.} \item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +\item{archived}{string, The archival status of the requested item(s).} } \description{ Update some attributes of this service report diff --git a/man/reports_post.Rd b/man/reports_post.Rd index 5fec9b45..58ede643 100644 --- a/man/reports_post.Rd +++ b/man/reports_post.Rd @@ -43,6 +43,7 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The ID for the project. diff --git a/man/reports_post_git_checkout.Rd b/man/reports_post_git_checkout.Rd new file mode 100644 index 00000000..d59e9f77 --- /dev/null +++ b/man/reports_post_git_checkout.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_post_git_checkout} +\alias{reports_post_git_checkout} +\title{Checkout content that the existing git_ref points to and save to the object} +\usage{ +reports_post_git_checkout(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout content that the existing git_ref points to and save to the object +} diff --git a/man/reports_post_git_checkout_latest.Rd b/man/reports_post_git_checkout_latest.Rd new file mode 100644 index 00000000..2bb03c9b --- /dev/null +++ b/man/reports_post_git_checkout_latest.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_post_git_checkout_latest} +\alias{reports_post_git_checkout_latest} +\title{Checkout latest commit on the current branch of a script or workflow} +\usage{ +reports_post_git_checkout_latest(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout latest commit on the current branch of a script or workflow +} diff --git a/man/reports_post_grants.Rd b/man/reports_post_grants.Rd index c3399d1d..ab97346a 100644 --- a/man/reports_post_grants.Rd +++ b/man/reports_post_grants.Rd @@ -23,6 +23,7 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The ID for the project. diff --git a/man/reports_post_services.Rd b/man/reports_post_services.Rd index c1928960..2b963994 100644 --- a/man/reports_post_services.Rd +++ b/man/reports_post_services.Rd @@ -25,12 +25,14 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{host}{string, The host for the service report} \item{displayUrl}{string, The URL to display the service report.} \item{serviceId}{integer, The id of the backing service} \item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} \item{apiKey}{string, A Civis API key that can be used by this report.} \item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +\item{archived}{string, The archival status of the requested item(s).} } \description{ Create a service report diff --git a/man/reports_put_archive.Rd b/man/reports_put_archive.Rd index a24d15df..617d1f58 100644 --- a/man/reports_put_archive.Rd +++ b/man/reports_put_archive.Rd @@ -25,6 +25,7 @@ A list containing the following elements: }} \item{createdAt}{string, } \item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{projects}{array, An array containing the following fields: \itemize{ \item id integer, The ID for the project. diff --git a/man/reports_put_git.Rd b/man/reports_put_git.Rd index 7f5a596d..f6c5b6b6 100644 --- a/man/reports_put_git.Rd +++ b/man/reports_put_git.Rd @@ -10,6 +10,7 @@ reports_put_git( git_branch = NULL, git_path = NULL, git_repo_url = NULL, + git_ref_type = NULL, pull_from_git = NULL ) } @@ -24,11 +25,13 @@ reports_put_git( \item{git_repo_url}{string optional. The URL of the git repository.} +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + \item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -38,7 +41,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Attach an item to a file in a git repo diff --git a/man/reports_put_services_archive.Rd b/man/reports_put_services_archive.Rd new file mode 100644 index 00000000..8f6e17f1 --- /dev/null +++ b/man/reports_put_services_archive.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_put_services_archive} +\alias{reports_put_services_archive} +\title{Update the archive status of this object} +\usage{ +reports_put_services_archive(id, status) +} +\arguments{ +\item{id}{integer required. The ID of the object.} + +\item{status}{boolean required. The desired archived status of the object.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of this report.} +\item{name}{string, The name of the report.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{host}{string, The host for the service report} +\item{displayUrl}{string, The URL to display the service report.} +\item{serviceId}{integer, The id of the backing service} +\item{provideAPIKey}{boolean, Whether the report requests an API Key from the report viewer.} +\item{apiKey}{string, A Civis API key that can be used by this report.} +\item{apiKeyId}{integer, The ID of the API key. Can be used for auditing API use by this report.} +\item{archived}{string, The archival status of the requested item(s).} +} +\description{ +Update the archive status of this object +} diff --git a/man/reports_put_services_transfer.Rd b/man/reports_put_services_transfer.Rd new file mode 100644 index 00000000..da7d6d31 --- /dev/null +++ b/man/reports_put_services_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_put_services_transfer} +\alias{reports_put_services_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +reports_put_services_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/reports_put_transfer.Rd b/man/reports_put_transfer.Rd new file mode 100644 index 00000000..7f9a71a8 --- /dev/null +++ b/man/reports_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{reports_put_transfer} +\alias{reports_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +reports_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/roles_list.Rd b/man/roles_list.Rd new file mode 100644 index 00000000..63bb4c84 --- /dev/null +++ b/man/roles_list.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{roles_list} +\alias{roles_list} +\title{List Roles} +\usage{ +roles_list(limit = NULL, page_num = NULL, order = NULL, order_dir = NULL) +} +\arguments{ +\item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to id. Must be one of: id.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc.} +} +\value{ +An array containing the following fields: +\item{id}{integer, ID of the Role.} +\item{name}{string, The name of the Role.} +\item{slug}{string, The slug.} +\item{description}{string, The description of the Role.} +} +\description{ +List Roles +} diff --git a/man/scripts_get.Rd b/man/scripts_get.Rd index e4baf5be..c3c5af51 100644 --- a/man/scripts_get.Rd +++ b/man/scripts_get.Rd @@ -59,10 +59,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -96,6 +97,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} diff --git a/man/scripts_get_containers.Rd b/man/scripts_get_containers.Rd index 25fdef53..a28cdca0 100644 --- a/man/scripts_get_containers.Rd +++ b/man/scripts_get_containers.Rd @@ -12,6 +12,12 @@ scripts_get_containers(id) \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the container.} \item{type}{string, The type of the script (e.g Container)} \item{createdAt}{string, The time this script was created.} @@ -59,10 +65,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -87,8 +94,8 @@ A list containing the following elements: }} \item{requiredResources}{list, A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. }} \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -110,9 +117,12 @@ A list containing the following elements: \item error string, The error message for this run, if present. }} \item{timeZone}{string, The time zone of this script.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ View a container diff --git a/man/scripts_get_containers_runs.Rd b/man/scripts_get_containers_runs.Rd index 75e3633f..665bfdb7 100644 --- a/man/scripts_get_containers_runs.Rd +++ b/man/scripts_get_containers_runs.Rd @@ -17,9 +17,12 @@ A list containing the following elements: \item{containerId}{integer, The ID of the container.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ Check status of a run diff --git a/man/scripts_get_custom.Rd b/man/scripts_get_custom.Rd index a4b9b7ff..9e9815c5 100644 --- a/man/scripts_get_custom.Rd +++ b/man/scripts_get_custom.Rd @@ -12,6 +12,12 @@ scripts_get_custom(id) \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the script.} \item{type}{string, The type of the script (e.g Custom)} \item{createdAt}{string, The time this script was created.} @@ -59,10 +65,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -95,6 +102,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -107,6 +115,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{requiredResources}{list, A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} +\item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Get a Custom Script diff --git a/man/scripts_get_custom_runs.Rd b/man/scripts_get_custom_runs.Rd index 6c87b174..ba880bd0 100644 --- a/man/scripts_get_custom_runs.Rd +++ b/man/scripts_get_custom_runs.Rd @@ -17,9 +17,12 @@ A list containing the following elements: \item{customId}{integer, The ID of the custom.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB. Only available if the backing script is a Python, R, or container script.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores. Only available if the backing script is a Python, R, or container script.} } \description{ Check status of a run diff --git a/man/scripts_get_javascript.Rd b/man/scripts_get_javascript.Rd index 32000781..582c9573 100644 --- a/man/scripts_get_javascript.Rd +++ b/man/scripts_get_javascript.Rd @@ -59,10 +59,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -96,12 +97,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} \item{source}{string, The body/text of the script.} \item{remoteHostId}{integer, The remote host ID that this script will connect to.} \item{credentialId}{integer, The credential that this script will use.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Get a JavaScript Script diff --git a/man/scripts_get_javascript_git_commits.Rd b/man/scripts_get_javascript_git_commits.Rd index 47e9df1d..5c1b5229 100644 --- a/man/scripts_get_javascript_git_commits.Rd +++ b/man/scripts_get_javascript_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_get_javascript_git_commits} \alias{scripts_get_javascript_git_commits} -\title{Get file contents at commit_hash} +\title{Get file contents at git ref} \usage{ scripts_get_javascript_git_commits(id, commit_hash) } @@ -19,5 +19,5 @@ A list containing the following elements: \item{fileHash}{string, The SHA of the file.} } \description{ -Get file contents at commit_hash +Get file contents at git ref } diff --git a/man/scripts_get_javascript_runs.Rd b/man/scripts_get_javascript_runs.Rd index d72c16f1..efccfd4f 100644 --- a/man/scripts_get_javascript_runs.Rd +++ b/man/scripts_get_javascript_runs.Rd @@ -17,8 +17,9 @@ A list containing the following elements: \item{javascriptId}{integer, The ID of the javascript.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/scripts_get_python3.Rd b/man/scripts_get_python3.Rd index 423b4089..afe614e5 100644 --- a/man/scripts_get_python3.Rd +++ b/man/scripts_get_python3.Rd @@ -59,10 +59,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -96,6 +97,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -109,6 +111,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Get a Python Script diff --git a/man/scripts_get_python3_git_commits.Rd b/man/scripts_get_python3_git_commits.Rd index 3d772ab3..d318ceff 100644 --- a/man/scripts_get_python3_git_commits.Rd +++ b/man/scripts_get_python3_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_get_python3_git_commits} \alias{scripts_get_python3_git_commits} -\title{Get file contents at commit_hash} +\title{Get file contents at git ref} \usage{ scripts_get_python3_git_commits(id, commit_hash) } @@ -19,5 +19,5 @@ A list containing the following elements: \item{fileHash}{string, The SHA of the file.} } \description{ -Get file contents at commit_hash +Get file contents at git ref } diff --git a/man/scripts_get_python3_runs.Rd b/man/scripts_get_python3_runs.Rd index 226d14f9..9d975bbf 100644 --- a/man/scripts_get_python3_runs.Rd +++ b/man/scripts_get_python3_runs.Rd @@ -17,9 +17,12 @@ A list containing the following elements: \item{pythonId}{integer, The ID of the python.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ Check status of a run diff --git a/man/scripts_get_r.Rd b/man/scripts_get_r.Rd index a635794a..aadf3700 100644 --- a/man/scripts_get_r.Rd +++ b/man/scripts_get_r.Rd @@ -59,10 +59,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -96,6 +97,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -109,6 +111,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Get an R Script diff --git a/man/scripts_get_r_git_commits.Rd b/man/scripts_get_r_git_commits.Rd index 5ffb1d05..2c0212af 100644 --- a/man/scripts_get_r_git_commits.Rd +++ b/man/scripts_get_r_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_get_r_git_commits} \alias{scripts_get_r_git_commits} -\title{Get file contents at commit_hash} +\title{Get file contents at git ref} \usage{ scripts_get_r_git_commits(id, commit_hash) } @@ -19,5 +19,5 @@ A list containing the following elements: \item{fileHash}{string, The SHA of the file.} } \description{ -Get file contents at commit_hash +Get file contents at git ref } diff --git a/man/scripts_get_r_runs.Rd b/man/scripts_get_r_runs.Rd index 5bc98371..0af19cd1 100644 --- a/man/scripts_get_r_runs.Rd +++ b/man/scripts_get_r_runs.Rd @@ -17,9 +17,12 @@ A list containing the following elements: \item{rId}{integer, The ID of the r.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ Check status of a run diff --git a/man/scripts_get_sql.Rd b/man/scripts_get_sql.Rd index b43a53e9..b08b3338 100644 --- a/man/scripts_get_sql.Rd +++ b/man/scripts_get_sql.Rd @@ -59,10 +59,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -96,6 +97,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -114,6 +116,7 @@ A list containing the following elements: \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Get a SQL script diff --git a/man/scripts_get_sql_git_commits.Rd b/man/scripts_get_sql_git_commits.Rd index 6c1cabb0..1bbd485f 100644 --- a/man/scripts_get_sql_git_commits.Rd +++ b/man/scripts_get_sql_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_get_sql_git_commits} \alias{scripts_get_sql_git_commits} -\title{Get file contents at commit_hash} +\title{Get file contents at git ref} \usage{ scripts_get_sql_git_commits(id, commit_hash) } @@ -19,5 +19,5 @@ A list containing the following elements: \item{fileHash}{string, The SHA of the file.} } \description{ -Get file contents at commit_hash +Get file contents at git ref } diff --git a/man/scripts_get_sql_runs.Rd b/man/scripts_get_sql_runs.Rd index 43d8ac69..a8e7ab60 100644 --- a/man/scripts_get_sql_runs.Rd +++ b/man/scripts_get_sql_runs.Rd @@ -13,19 +13,21 @@ scripts_get_sql_runs(id, run_id) } \value{ A list containing the following elements: -\item{id}{integer, The ID of this run.} -\item{sqlId}{integer, The ID of this sql.} -\item{state}{string, The state of this run.} +\item{id}{integer, The ID of the run.} +\item{sqlId}{integer, The ID of the sql.} +\item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started.} -\item{finishedAt}{string, The time that this run finished.} -\item{error}{string, The error message for this run, if present.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} +\item{error}{string, The error, if any, returned by the run.} \item{output}{array, An array containing the following fields: \itemize{ \item outputName string, The name of the output file. \item fileId integer, The unique ID of the output file. \item path string, The temporary link to download this output file, valid for 36 hours. }} +\item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} } \description{ Check status of a run diff --git a/man/scripts_list.Rd b/man/scripts_list.Rd index 071e3fb8..274ed9fd 100644 --- a/man/scripts_list.Rd +++ b/man/scripts_list.Rd @@ -22,7 +22,7 @@ scripts_list( \item{category}{string optional. A job category for filtering scripts. Must be one of script, import, export, and enhancement.} -\item{author}{string optional. If specified, return items from this author. Must use user IDs. A comma separated list of IDs is also accepted to return items from multiple authors.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} \item{status}{string optional. If specified, returns items with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.} diff --git a/man/scripts_list_containers_dependencies.Rd b/man/scripts_list_containers_dependencies.Rd new file mode 100644 index 00000000..a373aa11 --- /dev/null +++ b/man/scripts_list_containers_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_list_containers_dependencies} +\alias{scripts_list_containers_dependencies} +\title{List dependent objects for this object} +\usage{ +scripts_list_containers_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/scripts_list_containers_runs.Rd b/man/scripts_list_containers_runs.Rd index 9bf9e8fc..bc1dd3f3 100644 --- a/man/scripts_list_containers_runs.Rd +++ b/man/scripts_list_containers_runs.Rd @@ -29,9 +29,12 @@ An array containing the following fields: \item{containerId}{integer, The ID of the container.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ List runs for the given container diff --git a/man/scripts_list_custom.Rd b/man/scripts_list_custom.Rd index 842cb865..6d0b5b4b 100644 --- a/man/scripts_list_custom.Rd +++ b/man/scripts_list_custom.Rd @@ -19,7 +19,7 @@ scripts_list_custom( \arguments{ \item{from_template_id}{string optional. If specified, return scripts based on the template with this ID. Specify multiple IDs as a comma-separated list.} -\item{author}{string optional. If specified, return items from this author. Must use user IDs. A comma separated list of IDs is also accepted to return items from multiple authors.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} \item{status}{string optional. If specified, returns items with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.} diff --git a/man/scripts_list_custom_dependencies.Rd b/man/scripts_list_custom_dependencies.Rd new file mode 100644 index 00000000..edb45140 --- /dev/null +++ b/man/scripts_list_custom_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_list_custom_dependencies} +\alias{scripts_list_custom_dependencies} +\title{List dependent objects for this object} +\usage{ +scripts_list_custom_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/scripts_list_custom_runs.Rd b/man/scripts_list_custom_runs.Rd index 03a3cc34..85d22431 100644 --- a/man/scripts_list_custom_runs.Rd +++ b/man/scripts_list_custom_runs.Rd @@ -29,9 +29,12 @@ An array containing the following fields: \item{customId}{integer, The ID of the custom.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB. Only available if the backing script is a Python, R, or container script.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores. Only available if the backing script is a Python, R, or container script.} } \description{ List runs for the given custom diff --git a/man/scripts_list_javascript_dependencies.Rd b/man/scripts_list_javascript_dependencies.Rd new file mode 100644 index 00000000..0b6face1 --- /dev/null +++ b/man/scripts_list_javascript_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_list_javascript_dependencies} +\alias{scripts_list_javascript_dependencies} +\title{List dependent objects for this object} +\usage{ +scripts_list_javascript_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/scripts_list_javascript_git.Rd b/man/scripts_list_javascript_git.Rd index 72d8b1ba..40adb13a 100644 --- a/man/scripts_list_javascript_git.Rd +++ b/man/scripts_list_javascript_git.Rd @@ -11,7 +11,7 @@ scripts_list_javascript_git(id) } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -21,7 +21,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Get the git metadata attached to an item diff --git a/man/scripts_list_javascript_git_commits.Rd b/man/scripts_list_javascript_git_commits.Rd index 6041507c..2d4dde7d 100644 --- a/man/scripts_list_javascript_git_commits.Rd +++ b/man/scripts_list_javascript_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_list_javascript_git_commits} \alias{scripts_list_javascript_git_commits} -\title{Get the git commits for an item} +\title{Get the git commits for an item on the current branch} \usage{ scripts_list_javascript_git_commits(id) } @@ -17,5 +17,5 @@ A list containing the following elements: \item{message}{string, The commit message.} } \description{ -Get the git commits for an item +Get the git commits for an item on the current branch } diff --git a/man/scripts_list_javascript_runs.Rd b/man/scripts_list_javascript_runs.Rd index 29a4bb8c..eb9e4260 100644 --- a/man/scripts_list_javascript_runs.Rd +++ b/man/scripts_list_javascript_runs.Rd @@ -29,8 +29,9 @@ An array containing the following fields: \item{javascriptId}{integer, The ID of the javascript.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/scripts_list_python3_dependencies.Rd b/man/scripts_list_python3_dependencies.Rd new file mode 100644 index 00000000..c40decb2 --- /dev/null +++ b/man/scripts_list_python3_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_list_python3_dependencies} +\alias{scripts_list_python3_dependencies} +\title{List dependent objects for this object} +\usage{ +scripts_list_python3_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/scripts_list_python3_git.Rd b/man/scripts_list_python3_git.Rd index 2c58e26d..7baeda6c 100644 --- a/man/scripts_list_python3_git.Rd +++ b/man/scripts_list_python3_git.Rd @@ -11,7 +11,7 @@ scripts_list_python3_git(id) } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -21,7 +21,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Get the git metadata attached to an item diff --git a/man/scripts_list_python3_git_commits.Rd b/man/scripts_list_python3_git_commits.Rd index c338ee14..e2a6906b 100644 --- a/man/scripts_list_python3_git_commits.Rd +++ b/man/scripts_list_python3_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_list_python3_git_commits} \alias{scripts_list_python3_git_commits} -\title{Get the git commits for an item} +\title{Get the git commits for an item on the current branch} \usage{ scripts_list_python3_git_commits(id) } @@ -17,5 +17,5 @@ A list containing the following elements: \item{message}{string, The commit message.} } \description{ -Get the git commits for an item +Get the git commits for an item on the current branch } diff --git a/man/scripts_list_python3_runs.Rd b/man/scripts_list_python3_runs.Rd index fe3e2de2..e02464eb 100644 --- a/man/scripts_list_python3_runs.Rd +++ b/man/scripts_list_python3_runs.Rd @@ -29,9 +29,12 @@ An array containing the following fields: \item{pythonId}{integer, The ID of the python.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ List runs for the given python diff --git a/man/scripts_list_r_dependencies.Rd b/man/scripts_list_r_dependencies.Rd new file mode 100644 index 00000000..7412f02e --- /dev/null +++ b/man/scripts_list_r_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_list_r_dependencies} +\alias{scripts_list_r_dependencies} +\title{List dependent objects for this object} +\usage{ +scripts_list_r_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/scripts_list_r_git.Rd b/man/scripts_list_r_git.Rd index f57768c3..365e1b4f 100644 --- a/man/scripts_list_r_git.Rd +++ b/man/scripts_list_r_git.Rd @@ -11,7 +11,7 @@ scripts_list_r_git(id) } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -21,7 +21,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Get the git metadata attached to an item diff --git a/man/scripts_list_r_git_commits.Rd b/man/scripts_list_r_git_commits.Rd index 5a1786a3..1dcb38f7 100644 --- a/man/scripts_list_r_git_commits.Rd +++ b/man/scripts_list_r_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_list_r_git_commits} \alias{scripts_list_r_git_commits} -\title{Get the git commits for an item} +\title{Get the git commits for an item on the current branch} \usage{ scripts_list_r_git_commits(id) } @@ -17,5 +17,5 @@ A list containing the following elements: \item{message}{string, The commit message.} } \description{ -Get the git commits for an item +Get the git commits for an item on the current branch } diff --git a/man/scripts_list_r_runs.Rd b/man/scripts_list_r_runs.Rd index e0cc0ee4..a09524e5 100644 --- a/man/scripts_list_r_runs.Rd +++ b/man/scripts_list_r_runs.Rd @@ -29,9 +29,12 @@ An array containing the following fields: \item{rId}{integer, The ID of the r.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ List runs for the given r diff --git a/man/scripts_list_sql_dependencies.Rd b/man/scripts_list_sql_dependencies.Rd new file mode 100644 index 00000000..0646d27c --- /dev/null +++ b/man/scripts_list_sql_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_list_sql_dependencies} +\alias{scripts_list_sql_dependencies} +\title{List dependent objects for this object} +\usage{ +scripts_list_sql_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/scripts_list_sql_git.Rd b/man/scripts_list_sql_git.Rd index 520838f2..32c611f1 100644 --- a/man/scripts_list_sql_git.Rd +++ b/man/scripts_list_sql_git.Rd @@ -11,7 +11,7 @@ scripts_list_sql_git(id) } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -21,7 +21,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Get the git metadata attached to an item diff --git a/man/scripts_list_sql_git_commits.Rd b/man/scripts_list_sql_git_commits.Rd index 4540c5ed..6779cfe5 100644 --- a/man/scripts_list_sql_git_commits.Rd +++ b/man/scripts_list_sql_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{scripts_list_sql_git_commits} \alias{scripts_list_sql_git_commits} -\title{Get the git commits for an item} +\title{Get the git commits for an item on the current branch} \usage{ scripts_list_sql_git_commits(id) } @@ -17,5 +17,5 @@ A list containing the following elements: \item{message}{string, The commit message.} } \description{ -Get the git commits for an item +Get the git commits for an item on the current branch } diff --git a/man/scripts_list_sql_runs.Rd b/man/scripts_list_sql_runs.Rd index e3fb5711..3dad98a9 100644 --- a/man/scripts_list_sql_runs.Rd +++ b/man/scripts_list_sql_runs.Rd @@ -25,19 +25,21 @@ scripts_list_sql_runs( } \value{ An array containing the following fields: -\item{id}{integer, The ID of this run.} -\item{sqlId}{integer, The ID of this sql.} -\item{state}{string, The state of this run.} +\item{id}{integer, The ID of the run.} +\item{sqlId}{integer, The ID of the sql.} +\item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started.} -\item{finishedAt}{string, The time that this run finished.} -\item{error}{string, The error message for this run, if present.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} +\item{error}{string, The error, if any, returned by the run.} \item{output}{array, An array containing the following fields: \itemize{ \item outputName string, The name of the output file. \item fileId integer, The unique ID of the output file. \item path string, The temporary link to download this output file, valid for 36 hours. }} +\item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} } \description{ List runs for the given sql diff --git a/man/scripts_patch.Rd b/man/scripts_patch.Rd index 12cb9db6..39ec371a 100644 --- a/man/scripts_patch.Rd +++ b/man/scripts_patch.Rd @@ -13,7 +13,8 @@ scripts_patch( template_script_id = NULL, schedule = NULL, notifications = NULL, - parent_id = NULL + parent_id = NULL, + running_as_id = NULL ) } \arguments{ @@ -42,10 +43,11 @@ scripts_patch( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -63,6 +65,8 @@ scripts_patch( }} \item{parent_id}{integer optional. The ID of the parent job that will trigger this script} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -114,10 +118,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -151,6 +156,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} diff --git a/man/scripts_patch_container_runs.Rd b/man/scripts_patch_container_runs.Rd new file mode 100644 index 00000000..7314d274 --- /dev/null +++ b/man/scripts_patch_container_runs.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_container_runs} +\alias{scripts_patch_container_runs} +\title{Update the given run} +\usage{ +scripts_patch_container_runs(id, run_id, error = NULL) +} +\arguments{ +\item{id}{integer required. ID of the Job} + +\item{run_id}{integer required. ID of the Run} + +\item{error}{string optional. The error message to update} +} +\value{ +An empty HTTP response +} +\description{ +Update the given run +} diff --git a/man/scripts_patch_containers.Rd b/man/scripts_patch_containers.Rd index dde88a72..4a4062c0 100644 --- a/man/scripts_patch_containers.Rd +++ b/man/scripts_patch_containers.Rd @@ -24,7 +24,9 @@ scripts_patch_containers( instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, - target_project_id = NULL + partition_label = NULL, + target_project_id = NULL, + running_as_id = NULL ) } \arguments{ @@ -53,10 +55,11 @@ scripts_patch_containers( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -75,10 +78,9 @@ scripts_patch_containers( \item{required_resources}{list optional. A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. -\item wholeInstance boolean, Whether or not to use the entire instance. If true, cpu, memory, and disk space are not required and will be set to an instance's max. }} \item{repo_http_uri}{string optional. The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -101,11 +103,21 @@ scripts_patch_containers( \item{time_zone}{string optional. The time zone of this script.} +\item{partition_label}{string optional. The partition label used to run this object.} + \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the container.} \item{type}{string, The type of the script (e.g Container)} \item{createdAt}{string, The time this script was created.} @@ -153,10 +165,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -181,8 +194,8 @@ A list containing the following elements: }} \item{requiredResources}{list, A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. }} \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -204,9 +217,12 @@ A list containing the following elements: \item error string, The error message for this run, if present. }} \item{timeZone}{string, The time zone of this script.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update a container diff --git a/man/scripts_patch_custom.Rd b/man/scripts_patch_custom.Rd index fc4a45fe..7edfa9ec 100644 --- a/man/scripts_patch_custom.Rd +++ b/man/scripts_patch_custom.Rd @@ -14,7 +14,10 @@ scripts_patch_custom( schedule = NULL, notifications = NULL, time_zone = NULL, - target_project_id = NULL + target_project_id = NULL, + required_resources = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -33,10 +36,11 @@ scripts_patch_custom( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -56,10 +60,27 @@ scripts_patch_custom( \item{time_zone}{string optional. The time zone of this script.} \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{required_resources}{list optional. A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} + +\item{partition_label}{string optional. The partition label used to run this object. Only applicable for jobs using Docker.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the script.} \item{type}{string, The type of the script (e.g Custom)} \item{createdAt}{string, The time this script was created.} @@ -107,10 +128,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -143,6 +165,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -155,6 +178,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{requiredResources}{list, A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} +\item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update some attributes of this Custom Script diff --git a/man/scripts_patch_javascript.Rd b/man/scripts_patch_javascript.Rd index 5a8f394e..34ce08cb 100644 --- a/man/scripts_patch_javascript.Rd +++ b/man/scripts_patch_javascript.Rd @@ -18,7 +18,8 @@ scripts_patch_javascript( target_project_id = NULL, source = NULL, remote_host_id = NULL, - credential_id = NULL + credential_id = NULL, + running_as_id = NULL ) } \arguments{ @@ -47,10 +48,11 @@ scripts_patch_javascript( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -78,6 +80,8 @@ scripts_patch_javascript( \item{remote_host_id}{integer optional. The remote host ID that this script will connect to.} \item{credential_id}{integer optional. The credential that this script will use.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -129,10 +133,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -166,12 +171,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} \item{source}{string, The body/text of the script.} \item{remoteHostId}{integer, The remote host ID that this script will connect to.} \item{credentialId}{integer, The credential that this script will use.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update some attributes of this JavaScript Script diff --git a/man/scripts_patch_javascript_git.Rd b/man/scripts_patch_javascript_git.Rd new file mode 100644 index 00000000..f3a17d30 --- /dev/null +++ b/man/scripts_patch_javascript_git.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_javascript_git} +\alias{scripts_patch_javascript_git} +\title{Update an attached git file} +\usage{ +scripts_patch_javascript_git( + id, + git_ref = NULL, + git_branch = NULL, + git_path = NULL, + git_repo_url = NULL, + git_ref_type = NULL, + pull_from_git = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the file.} + +\item{git_ref}{string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} + +\item{git_branch}{string optional. The git branch that the file is on.} + +\item{git_path}{string optional. The path of the file in the repository.} + +\item{git_repo_url}{string optional. The URL of the git repository.} + +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + +\item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} +} +\value{ +A list containing the following elements: +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +\item{gitBranch}{string, The git branch that the file is on.} +\item{gitPath}{string, The path of the file in the repository.} +\item{gitRepo}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID for this git repository. +\item repoUrl string, The URL for this git repository. +\item createdAt string, +\item updatedAt string, +}} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +} +\description{ +Update an attached git file +} diff --git a/man/scripts_patch_javascript_runs.Rd b/man/scripts_patch_javascript_runs.Rd new file mode 100644 index 00000000..70dff567 --- /dev/null +++ b/man/scripts_patch_javascript_runs.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_javascript_runs} +\alias{scripts_patch_javascript_runs} +\title{Update the given run} +\usage{ +scripts_patch_javascript_runs(id, run_id, error = NULL) +} +\arguments{ +\item{id}{integer required. ID of the Job} + +\item{run_id}{integer required. ID of the Run} + +\item{error}{string optional. The error message to update} +} +\value{ +An empty HTTP response +} +\description{ +Update the given run +} diff --git a/man/scripts_patch_python3.Rd b/man/scripts_patch_python3.Rd index c5b87253..93a07a1c 100644 --- a/man/scripts_patch_python3.Rd +++ b/man/scripts_patch_python3.Rd @@ -20,7 +20,9 @@ scripts_patch_python3( instance_type = NULL, source = NULL, cancel_timeout = NULL, - docker_image_tag = NULL + docker_image_tag = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -49,10 +51,11 @@ scripts_patch_python3( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -89,6 +92,10 @@ scripts_patch_python3( \item{cancel_timeout}{integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{docker_image_tag}{string optional. The tag of the docker image to pull from DockerHub.} + +\item{partition_label}{string optional. The partition label used to run this object.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -140,10 +147,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -177,6 +185,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -190,6 +199,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update some attributes of this Python Script diff --git a/man/scripts_patch_python3_git.Rd b/man/scripts_patch_python3_git.Rd new file mode 100644 index 00000000..39dddde6 --- /dev/null +++ b/man/scripts_patch_python3_git.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_python3_git} +\alias{scripts_patch_python3_git} +\title{Update an attached git file} +\usage{ +scripts_patch_python3_git( + id, + git_ref = NULL, + git_branch = NULL, + git_path = NULL, + git_repo_url = NULL, + git_ref_type = NULL, + pull_from_git = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the file.} + +\item{git_ref}{string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} + +\item{git_branch}{string optional. The git branch that the file is on.} + +\item{git_path}{string optional. The path of the file in the repository.} + +\item{git_repo_url}{string optional. The URL of the git repository.} + +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + +\item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} +} +\value{ +A list containing the following elements: +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +\item{gitBranch}{string, The git branch that the file is on.} +\item{gitPath}{string, The path of the file in the repository.} +\item{gitRepo}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID for this git repository. +\item repoUrl string, The URL for this git repository. +\item createdAt string, +\item updatedAt string, +}} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +} +\description{ +Update an attached git file +} diff --git a/man/scripts_patch_python3_runs.Rd b/man/scripts_patch_python3_runs.Rd new file mode 100644 index 00000000..c9c54857 --- /dev/null +++ b/man/scripts_patch_python3_runs.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_python3_runs} +\alias{scripts_patch_python3_runs} +\title{Update the given run} +\usage{ +scripts_patch_python3_runs(id, run_id, error = NULL) +} +\arguments{ +\item{id}{integer required. ID of the Job} + +\item{run_id}{integer required. ID of the Run} + +\item{error}{string optional. The error message to update} +} +\value{ +An empty HTTP response +} +\description{ +Update the given run +} diff --git a/man/scripts_patch_r.Rd b/man/scripts_patch_r.Rd index c131cd49..bfc23cb3 100644 --- a/man/scripts_patch_r.Rd +++ b/man/scripts_patch_r.Rd @@ -20,7 +20,9 @@ scripts_patch_r( instance_type = NULL, source = NULL, cancel_timeout = NULL, - docker_image_tag = NULL + docker_image_tag = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -49,10 +51,11 @@ scripts_patch_r( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -89,6 +92,10 @@ scripts_patch_r( \item{cancel_timeout}{integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{docker_image_tag}{string optional. The tag of the docker image to pull from DockerHub.} + +\item{partition_label}{string optional. The partition label used to run this object.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -140,10 +147,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -177,6 +185,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -190,6 +199,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update some attributes of this R Script diff --git a/man/scripts_patch_r_git.Rd b/man/scripts_patch_r_git.Rd new file mode 100644 index 00000000..f0943a24 --- /dev/null +++ b/man/scripts_patch_r_git.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_r_git} +\alias{scripts_patch_r_git} +\title{Update an attached git file} +\usage{ +scripts_patch_r_git( + id, + git_ref = NULL, + git_branch = NULL, + git_path = NULL, + git_repo_url = NULL, + git_ref_type = NULL, + pull_from_git = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the file.} + +\item{git_ref}{string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} + +\item{git_branch}{string optional. The git branch that the file is on.} + +\item{git_path}{string optional. The path of the file in the repository.} + +\item{git_repo_url}{string optional. The URL of the git repository.} + +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + +\item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} +} +\value{ +A list containing the following elements: +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +\item{gitBranch}{string, The git branch that the file is on.} +\item{gitPath}{string, The path of the file in the repository.} +\item{gitRepo}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID for this git repository. +\item repoUrl string, The URL for this git repository. +\item createdAt string, +\item updatedAt string, +}} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +} +\description{ +Update an attached git file +} diff --git a/man/scripts_patch_r_runs.Rd b/man/scripts_patch_r_runs.Rd new file mode 100644 index 00000000..5d53cdc0 --- /dev/null +++ b/man/scripts_patch_r_runs.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_r_runs} +\alias{scripts_patch_r_runs} +\title{Update the given run} +\usage{ +scripts_patch_r_runs(id, run_id, error = NULL) +} +\arguments{ +\item{id}{integer required. ID of the Job} + +\item{run_id}{integer required. ID of the Run} + +\item{error}{string optional. The error message to update} +} +\value{ +An empty HTTP response +} +\description{ +Update the given run +} diff --git a/man/scripts_patch_sql.Rd b/man/scripts_patch_sql.Rd index 18f08d02..70bda7c6 100644 --- a/man/scripts_patch_sql.Rd +++ b/man/scripts_patch_sql.Rd @@ -19,7 +19,8 @@ scripts_patch_sql( sql = NULL, remote_host_id = NULL, credential_id = NULL, - csv_settings = NULL + csv_settings = NULL, + running_as_id = NULL ) } \arguments{ @@ -48,10 +49,11 @@ scripts_patch_sql( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -90,6 +92,8 @@ scripts_patch_sql( \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -141,10 +145,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -178,6 +183,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -196,6 +202,7 @@ A list containing the following elements: \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update some attributes of this SQL script diff --git a/man/scripts_patch_sql_git.Rd b/man/scripts_patch_sql_git.Rd new file mode 100644 index 00000000..961485e0 --- /dev/null +++ b/man/scripts_patch_sql_git.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_sql_git} +\alias{scripts_patch_sql_git} +\title{Update an attached git file} +\usage{ +scripts_patch_sql_git( + id, + git_ref = NULL, + git_branch = NULL, + git_path = NULL, + git_repo_url = NULL, + git_ref_type = NULL, + pull_from_git = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the file.} + +\item{git_ref}{string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} + +\item{git_branch}{string optional. The git branch that the file is on.} + +\item{git_path}{string optional. The path of the file in the repository.} + +\item{git_repo_url}{string optional. The URL of the git repository.} + +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + +\item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} +} +\value{ +A list containing the following elements: +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +\item{gitBranch}{string, The git branch that the file is on.} +\item{gitPath}{string, The path of the file in the repository.} +\item{gitRepo}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID for this git repository. +\item repoUrl string, The URL for this git repository. +\item createdAt string, +\item updatedAt string, +}} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +} +\description{ +Update an attached git file +} diff --git a/man/scripts_patch_sql_runs.Rd b/man/scripts_patch_sql_runs.Rd new file mode 100644 index 00000000..99c874a2 --- /dev/null +++ b/man/scripts_patch_sql_runs.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_patch_sql_runs} +\alias{scripts_patch_sql_runs} +\title{Update the given run} +\usage{ +scripts_patch_sql_runs(id, run_id, error = NULL) +} +\arguments{ +\item{id}{integer required. ID of the Job} + +\item{run_id}{integer required. ID of the Run} + +\item{error}{string optional. The error message to update} +} +\value{ +An empty HTTP response +} +\description{ +Update the given run +} diff --git a/man/scripts_post.Rd b/man/scripts_post.Rd index b26602a0..618a4fed 100644 --- a/man/scripts_post.Rd +++ b/man/scripts_post.Rd @@ -107,10 +107,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -144,6 +145,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} diff --git a/man/scripts_post_containers.Rd b/man/scripts_post_containers.Rd index bf100967..0b86dc79 100644 --- a/man/scripts_post_containers.Rd +++ b/man/scripts_post_containers.Rd @@ -23,17 +23,18 @@ scripts_post_containers( instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, + partition_label = NULL, hidden = NULL, - target_project_id = NULL + target_project_id = NULL, + running_as_id = NULL ) } \arguments{ \item{required_resources}{list required. A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. -\item wholeInstance boolean, Whether or not to use the entire instance. If true, cpu, memory, and disk space are not required and will be set to an instance's max. }} \item{docker_image_name}{string required. The name of the docker image to pull from DockerHub.} @@ -61,10 +62,11 @@ scripts_post_containers( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -99,13 +101,23 @@ scripts_post_containers( \item{time_zone}{string optional. The time zone of this script.} +\item{partition_label}{string optional. The partition label used to run this object.} + \item{hidden}{boolean optional. The hidden status of the item.} \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the container.} \item{type}{string, The type of the script (e.g Container)} \item{createdAt}{string, The time this script was created.} @@ -153,10 +165,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -181,8 +194,8 @@ A list containing the following elements: }} \item{requiredResources}{list, A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. }} \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -204,9 +217,12 @@ A list containing the following elements: \item error string, The error message for this run, if present. }} \item{timeZone}{string, The time zone of this script.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Create a container diff --git a/man/scripts_post_containers_clone.Rd b/man/scripts_post_containers_clone.Rd index 822fe73b..4f218e65 100644 --- a/man/scripts_post_containers_clone.Rd +++ b/man/scripts_post_containers_clone.Rd @@ -23,6 +23,12 @@ scripts_post_containers_clone( \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the container.} \item{type}{string, The type of the script (e.g Container)} \item{createdAt}{string, The time this script was created.} @@ -70,10 +76,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -98,8 +105,8 @@ A list containing the following elements: }} \item{requiredResources}{list, A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. }} \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -121,9 +128,12 @@ A list containing the following elements: \item error string, The error message for this run, if present. }} \item{timeZone}{string, The time zone of this script.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Clone this Container Script diff --git a/man/scripts_post_containers_runs.Rd b/man/scripts_post_containers_runs.Rd index c6588b8f..b66064be 100644 --- a/man/scripts_post_containers_runs.Rd +++ b/man/scripts_post_containers_runs.Rd @@ -15,9 +15,12 @@ A list containing the following elements: \item{containerId}{integer, The ID of the container.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ Start a run diff --git a/man/scripts_post_custom.Rd b/man/scripts_post_custom.Rd index 5bb634cd..3e857049 100644 --- a/man/scripts_post_custom.Rd +++ b/man/scripts_post_custom.Rd @@ -15,7 +15,10 @@ scripts_post_custom( notifications = NULL, time_zone = NULL, hidden = NULL, - target_project_id = NULL + target_project_id = NULL, + required_resources = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -34,10 +37,11 @@ scripts_post_custom( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -59,10 +63,27 @@ scripts_post_custom( \item{hidden}{boolean optional. The hidden status of the item.} \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{required_resources}{list optional. A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} + +\item{partition_label}{string optional. The partition label used to run this object. Only applicable for jobs using Docker.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the script.} \item{type}{string, The type of the script (e.g Custom)} \item{createdAt}{string, The time this script was created.} @@ -110,10 +131,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -146,6 +168,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -158,6 +181,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{requiredResources}{list, A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} +\item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Create a Custom Script diff --git a/man/scripts_post_custom_clone.Rd b/man/scripts_post_custom_clone.Rd index c8449500..53aac10f 100644 --- a/man/scripts_post_custom_clone.Rd +++ b/man/scripts_post_custom_clone.Rd @@ -23,6 +23,12 @@ scripts_post_custom_clone( \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the script.} \item{type}{string, The type of the script (e.g Custom)} \item{createdAt}{string, The time this script was created.} @@ -70,10 +76,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -106,6 +113,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -118,6 +126,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{requiredResources}{list, A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} +\item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Clone this Custom Script diff --git a/man/scripts_post_custom_runs.Rd b/man/scripts_post_custom_runs.Rd index 0d9f6dd7..862857bf 100644 --- a/man/scripts_post_custom_runs.Rd +++ b/man/scripts_post_custom_runs.Rd @@ -15,9 +15,12 @@ A list containing the following elements: \item{customId}{integer, The ID of the custom.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB. Only available if the backing script is a Python, R, or container script.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores. Only available if the backing script is a Python, R, or container script.} } \description{ Start a run diff --git a/man/scripts_post_javascript.Rd b/man/scripts_post_javascript.Rd index 9799963e..a50199a2 100644 --- a/man/scripts_post_javascript.Rd +++ b/man/scripts_post_javascript.Rd @@ -18,7 +18,8 @@ scripts_post_javascript( next_run_at = NULL, time_zone = NULL, hidden = NULL, - target_project_id = NULL + target_project_id = NULL, + running_as_id = NULL ) } \arguments{ @@ -51,10 +52,11 @@ scripts_post_javascript( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -78,6 +80,8 @@ scripts_post_javascript( \item{hidden}{boolean optional. The hidden status of the item.} \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -129,10 +133,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -166,12 +171,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} \item{source}{string, The body/text of the script.} \item{remoteHostId}{integer, The remote host ID that this script will connect to.} \item{credentialId}{integer, The credential that this script will use.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Create a JavaScript Script diff --git a/man/scripts_post_javascript_clone.Rd b/man/scripts_post_javascript_clone.Rd index 7b48ceea..a1462d64 100644 --- a/man/scripts_post_javascript_clone.Rd +++ b/man/scripts_post_javascript_clone.Rd @@ -70,10 +70,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -107,12 +108,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} \item{source}{string, The body/text of the script.} \item{remoteHostId}{integer, The remote host ID that this script will connect to.} \item{credentialId}{integer, The credential that this script will use.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Clone this JavaScript Script diff --git a/man/scripts_post_javascript_git_checkout.Rd b/man/scripts_post_javascript_git_checkout.Rd new file mode 100644 index 00000000..d9b76d83 --- /dev/null +++ b/man/scripts_post_javascript_git_checkout.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_javascript_git_checkout} +\alias{scripts_post_javascript_git_checkout} +\title{Checkout content that the existing git_ref points to and save to the object} +\usage{ +scripts_post_javascript_git_checkout(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout content that the existing git_ref points to and save to the object +} diff --git a/man/scripts_post_javascript_git_checkout_latest.Rd b/man/scripts_post_javascript_git_checkout_latest.Rd new file mode 100644 index 00000000..9c4a508a --- /dev/null +++ b/man/scripts_post_javascript_git_checkout_latest.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_javascript_git_checkout_latest} +\alias{scripts_post_javascript_git_checkout_latest} +\title{Checkout latest commit on the current branch of a script or workflow} +\usage{ +scripts_post_javascript_git_checkout_latest(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout latest commit on the current branch of a script or workflow +} diff --git a/man/scripts_post_javascript_runs.Rd b/man/scripts_post_javascript_runs.Rd index 90b8d25f..e07b3fcf 100644 --- a/man/scripts_post_javascript_runs.Rd +++ b/man/scripts_post_javascript_runs.Rd @@ -15,8 +15,9 @@ A list containing the following elements: \item{javascriptId}{integer, The ID of the javascript.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} } \description{ diff --git a/man/scripts_post_python3.Rd b/man/scripts_post_python3.Rd index 2877b946..e79a1a04 100644 --- a/man/scripts_post_python3.Rd +++ b/man/scripts_post_python3.Rd @@ -20,7 +20,9 @@ scripts_post_python3( required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, - docker_image_tag = NULL + docker_image_tag = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -49,10 +51,11 @@ scripts_post_python3( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -89,6 +92,10 @@ scripts_post_python3( \item{cancel_timeout}{integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{docker_image_tag}{string optional. The tag of the docker image to pull from DockerHub.} + +\item{partition_label}{string optional. The partition label used to run this object.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -140,10 +147,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -177,6 +185,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -190,6 +199,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Create a Python Script diff --git a/man/scripts_post_python3_clone.Rd b/man/scripts_post_python3_clone.Rd index c8b443df..92abea83 100644 --- a/man/scripts_post_python3_clone.Rd +++ b/man/scripts_post_python3_clone.Rd @@ -70,10 +70,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -107,6 +108,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -120,6 +122,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Clone this Python Script diff --git a/man/scripts_post_python3_git_checkout.Rd b/man/scripts_post_python3_git_checkout.Rd new file mode 100644 index 00000000..d2b3eca3 --- /dev/null +++ b/man/scripts_post_python3_git_checkout.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_python3_git_checkout} +\alias{scripts_post_python3_git_checkout} +\title{Checkout content that the existing git_ref points to and save to the object} +\usage{ +scripts_post_python3_git_checkout(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout content that the existing git_ref points to and save to the object +} diff --git a/man/scripts_post_python3_git_checkout_latest.Rd b/man/scripts_post_python3_git_checkout_latest.Rd new file mode 100644 index 00000000..033ca49a --- /dev/null +++ b/man/scripts_post_python3_git_checkout_latest.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_python3_git_checkout_latest} +\alias{scripts_post_python3_git_checkout_latest} +\title{Checkout latest commit on the current branch of a script or workflow} +\usage{ +scripts_post_python3_git_checkout_latest(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout latest commit on the current branch of a script or workflow +} diff --git a/man/scripts_post_python3_runs.Rd b/man/scripts_post_python3_runs.Rd index 520bc266..5102ce58 100644 --- a/man/scripts_post_python3_runs.Rd +++ b/man/scripts_post_python3_runs.Rd @@ -15,9 +15,12 @@ A list containing the following elements: \item{pythonId}{integer, The ID of the python.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ Start a run diff --git a/man/scripts_post_r.Rd b/man/scripts_post_r.Rd index 072d02b5..5ce475ab 100644 --- a/man/scripts_post_r.Rd +++ b/man/scripts_post_r.Rd @@ -20,7 +20,9 @@ scripts_post_r( required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, - docker_image_tag = NULL + docker_image_tag = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -49,10 +51,11 @@ scripts_post_r( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -89,6 +92,10 @@ scripts_post_r( \item{cancel_timeout}{integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{docker_image_tag}{string optional. The tag of the docker image to pull from DockerHub.} + +\item{partition_label}{string optional. The partition label used to run this object.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -140,10 +147,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -177,6 +185,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -190,6 +199,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Create an R Script diff --git a/man/scripts_post_r_clone.Rd b/man/scripts_post_r_clone.Rd index fc371905..a7e5c357 100644 --- a/man/scripts_post_r_clone.Rd +++ b/man/scripts_post_r_clone.Rd @@ -70,10 +70,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -107,6 +108,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -120,6 +122,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Clone this R Script diff --git a/man/scripts_post_r_git_checkout.Rd b/man/scripts_post_r_git_checkout.Rd new file mode 100644 index 00000000..064aeb37 --- /dev/null +++ b/man/scripts_post_r_git_checkout.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_r_git_checkout} +\alias{scripts_post_r_git_checkout} +\title{Checkout content that the existing git_ref points to and save to the object} +\usage{ +scripts_post_r_git_checkout(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout content that the existing git_ref points to and save to the object +} diff --git a/man/scripts_post_r_git_checkout_latest.Rd b/man/scripts_post_r_git_checkout_latest.Rd new file mode 100644 index 00000000..ff0dae32 --- /dev/null +++ b/man/scripts_post_r_git_checkout_latest.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_r_git_checkout_latest} +\alias{scripts_post_r_git_checkout_latest} +\title{Checkout latest commit on the current branch of a script or workflow} +\usage{ +scripts_post_r_git_checkout_latest(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout latest commit on the current branch of a script or workflow +} diff --git a/man/scripts_post_r_runs.Rd b/man/scripts_post_r_runs.Rd index f682e90f..54e64654 100644 --- a/man/scripts_post_r_runs.Rd +++ b/man/scripts_post_r_runs.Rd @@ -15,9 +15,12 @@ A list containing the following elements: \item{rId}{integer, The ID of the r.} \item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started at.} -\item{finishedAt}{string, The time the last run completed.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} \item{error}{string, The error, if any, returned by the run.} +\item{maxMemoryUsage}{number, If the run has finished, the maximum amount of memory used during the run, in MB.} +\item{maxCpuUsage}{number, If the run has finished, the maximum amount of cpu used during the run, in millicores.} } \description{ Start a run diff --git a/man/scripts_post_sql.Rd b/man/scripts_post_sql.Rd index 17ea1f33..0b9acd89 100644 --- a/man/scripts_post_sql.Rd +++ b/man/scripts_post_sql.Rd @@ -19,7 +19,8 @@ scripts_post_sql( time_zone = NULL, hidden = NULL, target_project_id = NULL, - csv_settings = NULL + csv_settings = NULL, + running_as_id = NULL ) } \arguments{ @@ -52,10 +53,11 @@ scripts_post_sql( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -90,6 +92,8 @@ scripts_post_sql( \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -141,10 +145,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -178,6 +183,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -196,6 +202,7 @@ A list containing the following elements: \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Create a SQL script diff --git a/man/scripts_post_sql_clone.Rd b/man/scripts_post_sql_clone.Rd index 3e733e6e..12fb4e66 100644 --- a/man/scripts_post_sql_clone.Rd +++ b/man/scripts_post_sql_clone.Rd @@ -70,10 +70,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -107,6 +108,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -125,6 +127,7 @@ A list containing the following elements: \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Clone this SQL script diff --git a/man/scripts_post_sql_git_checkout.Rd b/man/scripts_post_sql_git_checkout.Rd new file mode 100644 index 00000000..fd866b69 --- /dev/null +++ b/man/scripts_post_sql_git_checkout.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_sql_git_checkout} +\alias{scripts_post_sql_git_checkout} +\title{Checkout content that the existing git_ref points to and save to the object} +\usage{ +scripts_post_sql_git_checkout(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout content that the existing git_ref points to and save to the object +} diff --git a/man/scripts_post_sql_git_checkout_latest.Rd b/man/scripts_post_sql_git_checkout_latest.Rd new file mode 100644 index 00000000..e95656a9 --- /dev/null +++ b/man/scripts_post_sql_git_checkout_latest.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_post_sql_git_checkout_latest} +\alias{scripts_post_sql_git_checkout_latest} +\title{Checkout latest commit on the current branch of a script or workflow} +\usage{ +scripts_post_sql_git_checkout_latest(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout latest commit on the current branch of a script or workflow +} diff --git a/man/scripts_post_sql_runs.Rd b/man/scripts_post_sql_runs.Rd index 7b7d2573..e1ef8c3c 100644 --- a/man/scripts_post_sql_runs.Rd +++ b/man/scripts_post_sql_runs.Rd @@ -11,19 +11,21 @@ scripts_post_sql_runs(id) } \value{ A list containing the following elements: -\item{id}{integer, The ID of this run.} -\item{sqlId}{integer, The ID of this sql.} -\item{state}{string, The state of this run.} +\item{id}{integer, The ID of the run.} +\item{sqlId}{integer, The ID of the sql.} +\item{state}{string, The state of the run, one of 'queued' 'running' 'succeeded' 'failed' or 'cancelled'.} \item{isCancelRequested}{boolean, True if run cancel requested, else false.} -\item{startedAt}{string, The time the last run started.} -\item{finishedAt}{string, The time that this run finished.} -\item{error}{string, The error message for this run, if present.} +\item{createdAt}{string, The time the run was created.} +\item{startedAt}{string, The time the run started at.} +\item{finishedAt}{string, The time the run completed.} +\item{error}{string, The error, if any, returned by the run.} \item{output}{array, An array containing the following fields: \itemize{ \item outputName string, The name of the output file. \item fileId integer, The unique ID of the output file. \item path string, The temporary link to download this output file, valid for 36 hours. }} +\item{outputCachedOn}{string, The time that the output was originally exported, if a cache entry was used by the run.} } \description{ Start a run diff --git a/man/scripts_put_containers.Rd b/man/scripts_put_containers.Rd index b596a211..740afb5c 100644 --- a/man/scripts_put_containers.Rd +++ b/man/scripts_put_containers.Rd @@ -24,7 +24,9 @@ scripts_put_containers( instance_type = NULL, cancel_timeout = NULL, time_zone = NULL, - target_project_id = NULL + partition_label = NULL, + target_project_id = NULL, + running_as_id = NULL ) } \arguments{ @@ -32,10 +34,9 @@ scripts_put_containers( \item{required_resources}{list required. A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. -\item wholeInstance boolean, Whether or not to use the entire instance. If true, cpu, memory, and disk space are not required and will be set to an instance's max. }} \item{docker_image_name}{string required. The name of the docker image to pull from DockerHub.} @@ -63,10 +64,11 @@ scripts_put_containers( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -101,11 +103,21 @@ scripts_put_containers( \item{time_zone}{string optional. The time zone of this script.} +\item{partition_label}{string optional. The partition label used to run this object.} + \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the container.} \item{type}{string, The type of the script (e.g Container)} \item{createdAt}{string, The time this script was created.} @@ -153,10 +165,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -181,8 +194,8 @@ A list containing the following elements: }} \item{requiredResources}{list, A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. }} \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -204,9 +217,12 @@ A list containing the following elements: \item error string, The error message for this run, if present. }} \item{timeZone}{string, The time zone of this script.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Edit a container diff --git a/man/scripts_put_containers_archive.Rd b/man/scripts_put_containers_archive.Rd index f8f603f1..a4d0d229 100644 --- a/man/scripts_put_containers_archive.Rd +++ b/man/scripts_put_containers_archive.Rd @@ -14,6 +14,12 @@ scripts_put_containers_archive(id, status) \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the container.} \item{type}{string, The type of the script (e.g Container)} \item{createdAt}{string, The time this script was created.} @@ -61,10 +67,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -89,8 +96,8 @@ A list containing the following elements: }} \item{requiredResources}{list, A list containing the following elements: \itemize{ -\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. -\item memory integer, The amount of RAM to allocate for the container (in MB). +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. Must be at least 2 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). Must be at least 4 MB. \item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. }} \item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.} @@ -112,9 +119,12 @@ A list containing the following elements: \item error string, The error message for this run, if present. }} \item{timeZone}{string, The time zone of this script.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update the archive status of this object diff --git a/man/scripts_put_containers_transfer.Rd b/man/scripts_put_containers_transfer.Rd new file mode 100644 index 00000000..07b223c3 --- /dev/null +++ b/man/scripts_put_containers_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_put_containers_transfer} +\alias{scripts_put_containers_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +scripts_put_containers_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/scripts_put_custom.Rd b/man/scripts_put_custom.Rd index c86aeac4..8cdbd0d8 100644 --- a/man/scripts_put_custom.Rd +++ b/man/scripts_put_custom.Rd @@ -14,7 +14,10 @@ scripts_put_custom( schedule = NULL, notifications = NULL, time_zone = NULL, - target_project_id = NULL + target_project_id = NULL, + required_resources = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -33,10 +36,11 @@ scripts_put_custom( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -56,10 +60,27 @@ scripts_put_custom( \item{time_zone}{string optional. The time zone of this script.} \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{required_resources}{list optional. A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} + +\item{partition_label}{string optional. The partition label used to run this object. Only applicable for jobs using Docker.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the script.} \item{type}{string, The type of the script (e.g Custom)} \item{createdAt}{string, The time this script was created.} @@ -107,10 +128,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -143,6 +165,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -155,6 +178,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{requiredResources}{list, A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} +\item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Replace all attributes of this Custom Script diff --git a/man/scripts_put_custom_archive.Rd b/man/scripts_put_custom_archive.Rd index 7bab7c14..d7d64d2c 100644 --- a/man/scripts_put_custom_archive.Rd +++ b/man/scripts_put_custom_archive.Rd @@ -14,6 +14,12 @@ scripts_put_custom_archive(id, status) \value{ A list containing the following elements: \item{id}{integer, The ID for the script.} +\item{fromTemplateAliases}{array, An array containing the following fields: +\itemize{ +\item id integer, The id of the Alias object. +\item objectId integer, The id of the object +\item alias string, The alias of the object +}} \item{name}{string, The name of the script.} \item{type}{string, The type of the script (e.g Custom)} \item{createdAt}{string, The time this script was created.} @@ -61,10 +67,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -97,6 +104,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{archived}{string, The archival status of the requested item(s).} \item{targetProjectId}{integer, Target project to which script outputs will be added.} @@ -109,6 +117,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{requiredResources}{list, A list containing the following elements: +\itemize{ +\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1000 shares. +\item memory integer, The amount of RAM to allocate for the container (in MB). +\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported. +}} +\item{partitionLabel}{string, The partition label used to run this object. Only applicable for jobs using Docker.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update the archive status of this object diff --git a/man/scripts_put_custom_transfer.Rd b/man/scripts_put_custom_transfer.Rd new file mode 100644 index 00000000..0b3725cf --- /dev/null +++ b/man/scripts_put_custom_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_put_custom_transfer} +\alias{scripts_put_custom_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +scripts_put_custom_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/scripts_put_javascript.Rd b/man/scripts_put_javascript.Rd index 360e0a6b..f2fb1d07 100644 --- a/man/scripts_put_javascript.Rd +++ b/man/scripts_put_javascript.Rd @@ -18,7 +18,8 @@ scripts_put_javascript( notifications = NULL, next_run_at = NULL, time_zone = NULL, - target_project_id = NULL + target_project_id = NULL, + running_as_id = NULL ) } \arguments{ @@ -53,10 +54,11 @@ scripts_put_javascript( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -78,6 +80,8 @@ scripts_put_javascript( \item{time_zone}{string optional. The time zone of this script.} \item{target_project_id}{integer optional. Target project to which script outputs will be added.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -129,10 +133,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -166,12 +171,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} \item{source}{string, The body/text of the script.} \item{remoteHostId}{integer, The remote host ID that this script will connect to.} \item{credentialId}{integer, The credential that this script will use.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Replace all attributes of this JavaScript Script diff --git a/man/scripts_put_javascript_archive.Rd b/man/scripts_put_javascript_archive.Rd index b7f0f930..4a8f206c 100644 --- a/man/scripts_put_javascript_archive.Rd +++ b/man/scripts_put_javascript_archive.Rd @@ -61,10 +61,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -98,12 +99,14 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} \item{source}{string, The body/text of the script.} \item{remoteHostId}{integer, The remote host ID that this script will connect to.} \item{credentialId}{integer, The credential that this script will use.} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update the archive status of this object diff --git a/man/scripts_put_javascript_git.Rd b/man/scripts_put_javascript_git.Rd index 70a94ec0..41dd1d27 100644 --- a/man/scripts_put_javascript_git.Rd +++ b/man/scripts_put_javascript_git.Rd @@ -10,6 +10,7 @@ scripts_put_javascript_git( git_branch = NULL, git_path = NULL, git_repo_url = NULL, + git_ref_type = NULL, pull_from_git = NULL ) } @@ -24,11 +25,13 @@ scripts_put_javascript_git( \item{git_repo_url}{string optional. The URL of the git repository.} +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + \item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -38,7 +41,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Attach an item to a file in a git repo diff --git a/man/scripts_put_javascript_transfer.Rd b/man/scripts_put_javascript_transfer.Rd new file mode 100644 index 00000000..066d84c8 --- /dev/null +++ b/man/scripts_put_javascript_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_put_javascript_transfer} +\alias{scripts_put_javascript_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +scripts_put_javascript_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/scripts_put_python3.Rd b/man/scripts_put_python3.Rd index a57b10c1..dfdec70f 100644 --- a/man/scripts_put_python3.Rd +++ b/man/scripts_put_python3.Rd @@ -20,7 +20,9 @@ scripts_put_python3( required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, - docker_image_tag = NULL + docker_image_tag = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -51,10 +53,11 @@ scripts_put_python3( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -89,6 +92,10 @@ scripts_put_python3( \item{cancel_timeout}{integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{docker_image_tag}{string optional. The tag of the docker image to pull from DockerHub.} + +\item{partition_label}{string optional. The partition label used to run this object.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -140,10 +147,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -177,6 +185,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -190,6 +199,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Replace all attributes of this Python Script diff --git a/man/scripts_put_python3_archive.Rd b/man/scripts_put_python3_archive.Rd index d9747942..1143157d 100644 --- a/man/scripts_put_python3_archive.Rd +++ b/man/scripts_put_python3_archive.Rd @@ -61,10 +61,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -98,6 +99,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -111,6 +113,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update the archive status of this object diff --git a/man/scripts_put_python3_git.Rd b/man/scripts_put_python3_git.Rd index 3c3ffd16..77ec94b7 100644 --- a/man/scripts_put_python3_git.Rd +++ b/man/scripts_put_python3_git.Rd @@ -10,6 +10,7 @@ scripts_put_python3_git( git_branch = NULL, git_path = NULL, git_repo_url = NULL, + git_ref_type = NULL, pull_from_git = NULL ) } @@ -24,11 +25,13 @@ scripts_put_python3_git( \item{git_repo_url}{string optional. The URL of the git repository.} +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + \item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -38,7 +41,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Attach an item to a file in a git repo diff --git a/man/scripts_put_python3_transfer.Rd b/man/scripts_put_python3_transfer.Rd new file mode 100644 index 00000000..3a8fadaa --- /dev/null +++ b/man/scripts_put_python3_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_put_python3_transfer} +\alias{scripts_put_python3_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +scripts_put_python3_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/scripts_put_r.Rd b/man/scripts_put_r.Rd index 0c1f46e5..2e788fa1 100644 --- a/man/scripts_put_r.Rd +++ b/man/scripts_put_r.Rd @@ -20,7 +20,9 @@ scripts_put_r( required_resources = NULL, instance_type = NULL, cancel_timeout = NULL, - docker_image_tag = NULL + docker_image_tag = NULL, + partition_label = NULL, + running_as_id = NULL ) } \arguments{ @@ -51,10 +53,11 @@ scripts_put_r( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -89,6 +92,10 @@ scripts_put_r( \item{cancel_timeout}{integer optional. The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{docker_image_tag}{string optional. The tag of the docker image to pull from DockerHub.} + +\item{partition_label}{string optional. The partition label used to run this object.} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -140,10 +147,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -177,6 +185,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -190,6 +199,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Replace all attributes of this R Script diff --git a/man/scripts_put_r_archive.Rd b/man/scripts_put_r_archive.Rd index 8a625515..d80c9376 100644 --- a/man/scripts_put_r_archive.Rd +++ b/man/scripts_put_r_archive.Rd @@ -61,10 +61,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -98,6 +99,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -111,6 +113,8 @@ A list containing the following elements: \item{source}{string, The body/text of the script.} \item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.} +\item{partitionLabel}{string, The partition label used to run this object. } +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update the archive status of this object diff --git a/man/scripts_put_r_git.Rd b/man/scripts_put_r_git.Rd index 3617a00b..9c4d3ac2 100644 --- a/man/scripts_put_r_git.Rd +++ b/man/scripts_put_r_git.Rd @@ -10,6 +10,7 @@ scripts_put_r_git( git_branch = NULL, git_path = NULL, git_repo_url = NULL, + git_ref_type = NULL, pull_from_git = NULL ) } @@ -24,11 +25,13 @@ scripts_put_r_git( \item{git_repo_url}{string optional. The URL of the git repository.} +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + \item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -38,7 +41,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Attach an item to a file in a git repo diff --git a/man/scripts_put_r_transfer.Rd b/man/scripts_put_r_transfer.Rd new file mode 100644 index 00000000..3c394f7b --- /dev/null +++ b/man/scripts_put_r_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_put_r_transfer} +\alias{scripts_put_r_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +scripts_put_r_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/scripts_put_sql.Rd b/man/scripts_put_sql.Rd index fe459098..0e7a131f 100644 --- a/man/scripts_put_sql.Rd +++ b/man/scripts_put_sql.Rd @@ -19,7 +19,8 @@ scripts_put_sql( next_run_at = NULL, time_zone = NULL, target_project_id = NULL, - csv_settings = NULL + csv_settings = NULL, + running_as_id = NULL ) } \arguments{ @@ -54,10 +55,11 @@ scripts_put_sql( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list optional. A list containing the following elements: @@ -90,6 +92,8 @@ scripts_put_sql( \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} + +\item{running_as_id}{integer optional. The ID of the runner of this script.} } \value{ A list containing the following elements: @@ -141,10 +145,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -178,6 +183,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -196,6 +202,7 @@ A list containing the following elements: \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Replace all attributes of this SQL script diff --git a/man/scripts_put_sql_archive.Rd b/man/scripts_put_sql_archive.Rd index 3327d066..ebb65f9d 100644 --- a/man/scripts_put_sql_archive.Rd +++ b/man/scripts_put_sql_archive.Rd @@ -61,10 +61,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{notifications}{list, A list containing the following elements: \itemize{ @@ -98,6 +99,7 @@ A list containing the following elements: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{hidden}{boolean, The hidden status of the item.} \item{targetProjectId}{integer, Target project to which script outputs will be added.} \item{archived}{string, The archival status of the requested item(s).} @@ -116,6 +118,7 @@ A list containing the following elements: \item filenamePrefix string, A user specified filename prefix for the output file to have. Default: null \item maxFileSize integer, The max file size, in MB, created files will be. Only available when force_multifile is true. }} +\item{runningAsId}{integer, The ID of the runner of this script.} } \description{ Update the archive status of this object diff --git a/man/scripts_put_sql_git.Rd b/man/scripts_put_sql_git.Rd index 792c45c1..3bba8618 100644 --- a/man/scripts_put_sql_git.Rd +++ b/man/scripts_put_sql_git.Rd @@ -10,6 +10,7 @@ scripts_put_sql_git( git_branch = NULL, git_path = NULL, git_repo_url = NULL, + git_ref_type = NULL, pull_from_git = NULL ) } @@ -24,11 +25,13 @@ scripts_put_sql_git( \item{git_repo_url}{string optional. The URL of the git repository.} +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + \item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -38,7 +41,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Attach an item to a file in a git repo diff --git a/man/scripts_put_sql_transfer.Rd b/man/scripts_put_sql_transfer.Rd new file mode 100644 index 00000000..379accf1 --- /dev/null +++ b/man/scripts_put_sql_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{scripts_put_sql_transfer} +\alias{scripts_put_sql_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +scripts_put_sql_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/search_list.Rd b/man/search_list.Rd index 681cede1..f067a6e7 100644 --- a/man/search_list.Rd +++ b/man/search_list.Rd @@ -52,6 +52,7 @@ An array containing the following fields: \item lastRunFinish string, The last run finish time of the item, if the item is a job. \item public boolean, The flag that indicates a template is available to all users. \item lastRunException string, The exception of the item after the last run, if the item is a job. +\item autoShare boolean, The flag that indicates if a project has Auto-Share enabled. }} } \description{ diff --git a/man/search_list_queries.Rd b/man/search_list_queries.Rd new file mode 100644 index 00000000..6ebf1053 --- /dev/null +++ b/man/search_list_queries.Rd @@ -0,0 +1,68 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{search_list_queries} +\alias{search_list_queries} +\title{Search queries that are not hidden} +\usage{ +search_list_queries( + search_string = NULL, + database_id = NULL, + credential_id = NULL, + author_id = NULL, + archived = NULL, + state = NULL, + started_before = NULL, + started_after = NULL, + limit = NULL, + page_num = NULL, + order = NULL, + order_dir = NULL +) +} +\arguments{ +\item{search_string}{string optional. Space delimited search terms for searching queries by their SQL. Supports wild card characters "?" for any single character, and "*" for zero or more characters.} + +\item{database_id}{integer optional. The database ID.} + +\item{credential_id}{integer optional. The credential ID.} + +\item{author_id}{integer optional. The author of the query.} + +\item{archived}{boolean optional. The archival status of the requested item(s). Defaults to false.} + +\item{state}{array optional. The state of the last run. One or more of queued, running, succeeded, failed, and cancelled.} + +\item{started_before}{string optional. An upper bound for the start date of the last run.} + +\item{started_after}{string optional. A lower bound for the start date of the last run.} + +\item{limit}{integer optional. Number of results to return. Defaults to 10. Maximum allowed is 50.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to last_run_started_at. Must be one of: last_run_started_at.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.} +} +\value{ +An array containing the following fields: +\item{id}{integer, The query ID.} +\item{database}{integer, The database ID.} +\item{credential}{integer, The credential ID.} +\item{sql}{string, The SQL executed by the query.} +\item{authorId}{integer, The author of the query.} +\item{archived}{boolean, The archival status of the requested item(s).} +\item{createdAt}{string, } +\item{updatedAt}{string, } +\item{lastRun}{list, A list containing the following elements: +\itemize{ +\item id integer, +\item state string, The state of the run. One of queued, running, succeeded, failed, and cancelled. +\item startedAt string, The time that the run started. +\item finishedAt string, The time that the run completed. +\item error string, The error message for this run, if present. +}} +} +\description{ +Search queries that are not hidden +} diff --git a/man/services_get.Rd b/man/services_get.Rd index 5ae9219d..114b943f 100644 --- a/man/services_get.Rd +++ b/man/services_get.Rd @@ -39,7 +39,6 @@ A list containing the following elements: \item{createdAt}{string, } \item{updatedAt}{string, } \item{credentials}{array, A list of credential IDs to pass to the Service.} -\item{apiKeyId}{integer, API key id of user} \item{permissionSetId}{integer, The ID of the associated permission set, if any.} \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -55,42 +54,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, -\item serviceId integer, The ID of owning Service -}} -\item{previewDeployment}{list, A list containing the following elements: -\itemize{ -\item deploymentId integer, The ID for this deployment. -\item userId integer, The ID of the owner. -\item host string, Domain of the deployment. -\item name string, Name of the deployment. -\item dockerImageName string, The name of the docker image to pull from DockerHub. -\item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -\item displayUrl string, A signed URL for viewing the deployed item. -\item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. -\item state string, The state of the deployment. -\item stateMessage string, A detailed description of the state. -\item createdAt string, -\item updatedAt string, -\item published boolean, \item serviceId integer, The ID of owning Service }} \item{currentUrl}{string, The URL that the service is hosted at.} -\item{previewUrl}{string, The URL that previews of the service are hosted at.} \item{environmentVariables}{list, Environment Variables to be passed into the Service.} \item{notifications}{list, A list containing the following elements: \itemize{ \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} +\item{partitionLabel}{string, The partition label used to run this object.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/services_get_deployments.Rd b/man/services_get_deployments.Rd index 7637422e..57a63801 100644 --- a/man/services_get_deployments.Rd +++ b/man/services_get_deployments.Rd @@ -21,13 +21,14 @@ A list containing the following elements: \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} \item{displayUrl}{string, A signed URL for viewing the deployed item.} \item{instanceType}{string, The EC2 instance type requested for the deployment.} -\item{memory}{integer, The memory allocated to the deployment.} -\item{cpu}{integer, The cpu allocated to the deployment.} +\item{memory}{integer, The memory allocated to the deployment, in MB.} +\item{cpu}{integer, The cpu allocated to the deployment, in millicores.} \item{state}{string, The state of the deployment.} \item{stateMessage}{string, A detailed description of the state.} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } -\item{published}{boolean, } \item{serviceId}{integer, The ID of owning Service} } \description{ diff --git a/man/services_list.Rd b/man/services_list.Rd index 0189290f..42d09178 100644 --- a/man/services_list.Rd +++ b/man/services_list.Rd @@ -20,7 +20,7 @@ services_list( \item{archived}{string optional. The archival status of the requested item(s).} -\item{author}{string optional. If specified, return imports from this author. It accepts a comma-separated list of author IDs.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} \item{status}{string optional. If specified, returns Services with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'idle'.} @@ -60,31 +60,14 @@ An array containing the following fields: \item dockerImageName string, The name of the docker image to pull from DockerHub. \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, -\item serviceId integer, The ID of owning Service -}} -\item{previewDeployment}{list, A list containing the following elements: -\itemize{ -\item deploymentId integer, The ID for this deployment. -\item userId integer, The ID of the owner. -\item host string, Domain of the deployment. -\item name string, Name of the deployment. -\item dockerImageName string, The name of the docker image to pull from DockerHub. -\item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -\item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. -\item state string, The state of the deployment. -\item stateMessage string, A detailed description of the state. -\item createdAt string, -\item updatedAt string, -\item published boolean, \item serviceId integer, The ID of owning Service }} \item{archived}{string, The archival status of the requested item(s).} diff --git a/man/services_list_dependencies.Rd b/man/services_list_dependencies.Rd new file mode 100644 index 00000000..e87ced62 --- /dev/null +++ b/man/services_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{services_list_dependencies} +\alias{services_list_dependencies} +\title{List dependent objects for this object} +\usage{ +services_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/services_list_deployments.Rd b/man/services_list_deployments.Rd index 77f95dae..27a1dc5c 100644 --- a/man/services_list_deployments.Rd +++ b/man/services_list_deployments.Rd @@ -35,13 +35,14 @@ An array containing the following fields: \item{dockerImageName}{string, The name of the docker image to pull from DockerHub.} \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} \item{instanceType}{string, The EC2 instance type requested for the deployment.} -\item{memory}{integer, The memory allocated to the deployment.} -\item{cpu}{integer, The cpu allocated to the deployment.} +\item{memory}{integer, The memory allocated to the deployment, in MB.} +\item{cpu}{integer, The cpu allocated to the deployment, in millicores.} \item{state}{string, The state of the deployment.} \item{stateMessage}{string, A detailed description of the state.} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } -\item{published}{boolean, } \item{serviceId}{integer, The ID of owning Service} } \description{ diff --git a/man/services_list_tokens.Rd b/man/services_list_tokens.Rd index c8ee943f..38d91647 100644 --- a/man/services_list_tokens.Rd +++ b/man/services_list_tokens.Rd @@ -22,6 +22,7 @@ An array containing the following fields: \item online boolean, Whether this user is online. }} \item{machineToken}{boolean, If true, this token is not tied to a particular user.} +\item{expiresAt}{string, The date and time when the token expires.} \item{createdAt}{string, The date and time when the token was created.} } \description{ diff --git a/man/services_patch.Rd b/man/services_patch.Rd index 1b8b9754..1d0be1bb 100644 --- a/man/services_patch.Rd +++ b/man/services_patch.Rd @@ -17,13 +17,13 @@ services_patch( memory = NULL, cpu = NULL, credentials = NULL, - api_key_id = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, - notifications = NULL + notifications = NULL, + partition_label = NULL ) } \arguments{ @@ -55,8 +55,6 @@ services_patch( \item{credentials}{array optional. A list of credential IDs to pass to the Service.} -\item{api_key_id}{integer optional. API key id of user} - \item{permission_set_id}{integer optional. The ID of the associated permission set, if any.} \item{git_repo_url}{string optional. The url for the git repo where the Service code lives.} @@ -72,6 +70,8 @@ services_patch( \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} + +\item{partition_label}{string optional. The partition label used to run this object.} } \value{ A list containing the following elements: @@ -103,7 +103,6 @@ A list containing the following elements: \item{createdAt}{string, } \item{updatedAt}{string, } \item{credentials}{array, A list of credential IDs to pass to the Service.} -\item{apiKeyId}{integer, API key id of user} \item{permissionSetId}{integer, The ID of the associated permission set, if any.} \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -119,42 +118,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. -\item state string, The state of the deployment. -\item stateMessage string, A detailed description of the state. -\item createdAt string, -\item updatedAt string, -\item published boolean, -\item serviceId integer, The ID of owning Service -}} -\item{previewDeployment}{list, A list containing the following elements: -\itemize{ -\item deploymentId integer, The ID for this deployment. -\item userId integer, The ID of the owner. -\item host string, Domain of the deployment. -\item name string, Name of the deployment. -\item dockerImageName string, The name of the docker image to pull from DockerHub. -\item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -\item displayUrl string, A signed URL for viewing the deployed item. -\item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item serviceId integer, The ID of owning Service }} \item{currentUrl}{string, The URL that the service is hosted at.} -\item{previewUrl}{string, The URL that previews of the service are hosted at.} \item{environmentVariables}{list, Environment Variables to be passed into the Service.} \item{notifications}{list, A list containing the following elements: \itemize{ \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} +\item{partitionLabel}{string, The partition label used to run this object.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/services_post.Rd b/man/services_post.Rd index 31539829..cf84ae30 100644 --- a/man/services_post.Rd +++ b/man/services_post.Rd @@ -17,13 +17,13 @@ services_post( memory = NULL, cpu = NULL, credentials = NULL, - api_key_id = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, notifications = NULL, + partition_label = NULL, hidden = NULL ) } @@ -56,8 +56,6 @@ services_post( \item{credentials}{array optional. A list of credential IDs to pass to the Service.} -\item{api_key_id}{integer optional. API key id of user} - \item{permission_set_id}{integer optional. The ID of the associated permission set, if any.} \item{git_repo_url}{string optional. The url for the git repo where the Service code lives.} @@ -74,6 +72,8 @@ services_post( \item failureOn boolean, If failure email notifications are on }} +\item{partition_label}{string optional. The partition label used to run this object.} + \item{hidden}{boolean optional. The hidden status of the item.} } \value{ @@ -106,7 +106,6 @@ A list containing the following elements: \item{createdAt}{string, } \item{updatedAt}{string, } \item{credentials}{array, A list of credential IDs to pass to the Service.} -\item{apiKeyId}{integer, API key id of user} \item{permissionSetId}{integer, The ID of the associated permission set, if any.} \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -122,42 +121,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. -\item state string, The state of the deployment. -\item stateMessage string, A detailed description of the state. -\item createdAt string, -\item updatedAt string, -\item published boolean, -\item serviceId integer, The ID of owning Service -}} -\item{previewDeployment}{list, A list containing the following elements: -\itemize{ -\item deploymentId integer, The ID for this deployment. -\item userId integer, The ID of the owner. -\item host string, Domain of the deployment. -\item name string, Name of the deployment. -\item dockerImageName string, The name of the docker image to pull from DockerHub. -\item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -\item displayUrl string, A signed URL for viewing the deployed item. -\item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item serviceId integer, The ID of owning Service }} \item{currentUrl}{string, The URL that the service is hosted at.} -\item{previewUrl}{string, The URL that previews of the service are hosted at.} \item{environmentVariables}{list, Environment Variables to be passed into the Service.} \item{notifications}{list, A list containing the following elements: \itemize{ \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} +\item{partitionLabel}{string, The partition label used to run this object.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/services_post_clone.Rd b/man/services_post_clone.Rd index ab3faf9a..6ed2fe9a 100644 --- a/man/services_post_clone.Rd +++ b/man/services_post_clone.Rd @@ -39,7 +39,6 @@ A list containing the following elements: \item{createdAt}{string, } \item{updatedAt}{string, } \item{credentials}{array, A list of credential IDs to pass to the Service.} -\item{apiKeyId}{integer, API key id of user} \item{permissionSetId}{integer, The ID of the associated permission set, if any.} \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -55,42 +54,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, -\item serviceId integer, The ID of owning Service -}} -\item{previewDeployment}{list, A list containing the following elements: -\itemize{ -\item deploymentId integer, The ID for this deployment. -\item userId integer, The ID of the owner. -\item host string, Domain of the deployment. -\item name string, Name of the deployment. -\item dockerImageName string, The name of the docker image to pull from DockerHub. -\item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -\item displayUrl string, A signed URL for viewing the deployed item. -\item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. -\item state string, The state of the deployment. -\item stateMessage string, A detailed description of the state. -\item createdAt string, -\item updatedAt string, -\item published boolean, \item serviceId integer, The ID of owning Service }} \item{currentUrl}{string, The URL that the service is hosted at.} -\item{previewUrl}{string, The URL that previews of the service are hosted at.} \item{environmentVariables}{list, Environment Variables to be passed into the Service.} \item{notifications}{list, A list containing the following elements: \itemize{ \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} +\item{partitionLabel}{string, The partition label used to run this object.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/services_post_deployments.Rd b/man/services_post_deployments.Rd index a2c8fa9d..00f13981 100644 --- a/man/services_post_deployments.Rd +++ b/man/services_post_deployments.Rd @@ -4,14 +4,12 @@ \alias{services_post_deployments} \title{Deploy a Service} \usage{ -services_post_deployments(service_id, deployment_id = NULL, published = NULL) +services_post_deployments(service_id, deployment_id = NULL) } \arguments{ \item{service_id}{integer required. The ID of the owning Service} \item{deployment_id}{integer optional. The ID for this deployment} - -\item{published}{boolean optional.} } \value{ A list containing the following elements: @@ -23,13 +21,14 @@ A list containing the following elements: \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} \item{displayUrl}{string, A signed URL for viewing the deployed item.} \item{instanceType}{string, The EC2 instance type requested for the deployment.} -\item{memory}{integer, The memory allocated to the deployment.} -\item{cpu}{integer, The cpu allocated to the deployment.} +\item{memory}{integer, The memory allocated to the deployment, in MB.} +\item{cpu}{integer, The cpu allocated to the deployment, in millicores.} \item{state}{string, The state of the deployment.} \item{stateMessage}{string, A detailed description of the state.} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } -\item{published}{boolean, } \item{serviceId}{integer, The ID of owning Service} } \description{ diff --git a/man/services_post_redeploy.Rd b/man/services_post_redeploy.Rd index 2d3a0403..99a58e7c 100644 --- a/man/services_post_redeploy.Rd +++ b/man/services_post_redeploy.Rd @@ -4,14 +4,12 @@ \alias{services_post_redeploy} \title{Redeploy a Service} \usage{ -services_post_redeploy(service_id, deployment_id = NULL, published = NULL) +services_post_redeploy(service_id, deployment_id = NULL) } \arguments{ \item{service_id}{integer required. The ID of the owning Service} \item{deployment_id}{integer optional. The ID for this deployment} - -\item{published}{boolean optional.} } \value{ A list containing the following elements: @@ -23,13 +21,14 @@ A list containing the following elements: \item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub (default: latest).} \item{displayUrl}{string, A signed URL for viewing the deployed item.} \item{instanceType}{string, The EC2 instance type requested for the deployment.} -\item{memory}{integer, The memory allocated to the deployment.} -\item{cpu}{integer, The cpu allocated to the deployment.} +\item{memory}{integer, The memory allocated to the deployment, in MB.} +\item{cpu}{integer, The cpu allocated to the deployment, in millicores.} \item{state}{string, The state of the deployment.} \item{stateMessage}{string, A detailed description of the state.} +\item{maxMemoryUsage}{number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB.} +\item{maxCpuUsage}{number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores.} \item{createdAt}{string, } \item{updatedAt}{string, } -\item{published}{boolean, } \item{serviceId}{integer, The ID of owning Service} } \description{ diff --git a/man/services_post_tokens.Rd b/man/services_post_tokens.Rd index cab44712..5d5678e5 100644 --- a/man/services_post_tokens.Rd +++ b/man/services_post_tokens.Rd @@ -4,7 +4,7 @@ \alias{services_post_tokens} \title{Create a new long-lived service token} \usage{ -services_post_tokens(id, name, machine_token = NULL) +services_post_tokens(id, name, machine_token = NULL, expires_in = NULL) } \arguments{ \item{id}{integer required. The ID of the service.} @@ -12,6 +12,8 @@ services_post_tokens(id, name, machine_token = NULL) \item{name}{string required. The name of the token.} \item{machine_token}{boolean optional. If true, create a compact token with no user information.} + +\item{expires_in}{integer optional. The number of seconds until the token should expire} } \value{ A list containing the following elements: @@ -26,6 +28,7 @@ A list containing the following elements: \item online boolean, Whether this user is online. }} \item{machineToken}{boolean, If true, this token is not tied to a particular user.} +\item{expiresAt}{string, The date and time when the token expires.} \item{createdAt}{string, The date and time when the token was created.} \item{token}{string, The value of the token. Only returned when the token is first created.} } diff --git a/man/services_put.Rd b/man/services_put.Rd index f372e323..b8d65c73 100644 --- a/man/services_put.Rd +++ b/man/services_put.Rd @@ -17,13 +17,13 @@ services_put( memory = NULL, cpu = NULL, credentials = NULL, - api_key_id = NULL, permission_set_id = NULL, git_repo_url = NULL, git_repo_ref = NULL, git_path_dir = NULL, environment_variables = NULL, - notifications = NULL + notifications = NULL, + partition_label = NULL ) } \arguments{ @@ -55,8 +55,6 @@ services_put( \item{credentials}{array optional. A list of credential IDs to pass to the Service.} -\item{api_key_id}{integer optional. API key id of user} - \item{permission_set_id}{integer optional. The ID of the associated permission set, if any.} \item{git_repo_url}{string optional. The url for the git repo where the Service code lives.} @@ -72,6 +70,8 @@ services_put( \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} + +\item{partition_label}{string optional. The partition label used to run this object.} } \value{ A list containing the following elements: @@ -103,7 +103,6 @@ A list containing the following elements: \item{createdAt}{string, } \item{updatedAt}{string, } \item{credentials}{array, A list of credential IDs to pass to the Service.} -\item{apiKeyId}{integer, API key id of user} \item{permissionSetId}{integer, The ID of the associated permission set, if any.} \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -119,42 +118,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. -\item state string, The state of the deployment. -\item stateMessage string, A detailed description of the state. -\item createdAt string, -\item updatedAt string, -\item published boolean, -\item serviceId integer, The ID of owning Service -}} -\item{previewDeployment}{list, A list containing the following elements: -\itemize{ -\item deploymentId integer, The ID for this deployment. -\item userId integer, The ID of the owner. -\item host string, Domain of the deployment. -\item name string, Name of the deployment. -\item dockerImageName string, The name of the docker image to pull from DockerHub. -\item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -\item displayUrl string, A signed URL for viewing the deployed item. -\item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, \item serviceId integer, The ID of owning Service }} \item{currentUrl}{string, The URL that the service is hosted at.} -\item{previewUrl}{string, The URL that previews of the service are hosted at.} \item{environmentVariables}{list, Environment Variables to be passed into the Service.} \item{notifications}{list, A list containing the following elements: \itemize{ \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} +\item{partitionLabel}{string, The partition label used to run this object.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/services_put_archive.Rd b/man/services_put_archive.Rd index cf232c05..5cafea82 100644 --- a/man/services_put_archive.Rd +++ b/man/services_put_archive.Rd @@ -41,7 +41,6 @@ A list containing the following elements: \item{createdAt}{string, } \item{updatedAt}{string, } \item{credentials}{array, A list of credential IDs to pass to the Service.} -\item{apiKeyId}{integer, API key id of user} \item{permissionSetId}{integer, The ID of the associated permission set, if any.} \item{gitRepoUrl}{string, The url for the git repo where the Service code lives.} \item{gitRepoRef}{string, The git reference to use when pulling code from the repo.} @@ -57,42 +56,25 @@ A list containing the following elements: \item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). \item displayUrl string, A signed URL for viewing the deployed item. \item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. +\item memory integer, The memory allocated to the deployment, in MB. +\item cpu integer, The cpu allocated to the deployment, in millicores. \item state string, The state of the deployment. \item stateMessage string, A detailed description of the state. +\item maxMemoryUsage number, If the deployment has finished, the maximum amount of memory used during the deployment, in MB. +\item maxCpuUsage number, If the deployment has finished, the maximum amount of cpu used during the deployment, in millicores. \item createdAt string, \item updatedAt string, -\item published boolean, -\item serviceId integer, The ID of owning Service -}} -\item{previewDeployment}{list, A list containing the following elements: -\itemize{ -\item deploymentId integer, The ID for this deployment. -\item userId integer, The ID of the owner. -\item host string, Domain of the deployment. -\item name string, Name of the deployment. -\item dockerImageName string, The name of the docker image to pull from DockerHub. -\item dockerImageTag string, The tag of the docker image to pull from DockerHub (default: latest). -\item displayUrl string, A signed URL for viewing the deployed item. -\item instanceType string, The EC2 instance type requested for the deployment. -\item memory integer, The memory allocated to the deployment. -\item cpu integer, The cpu allocated to the deployment. -\item state string, The state of the deployment. -\item stateMessage string, A detailed description of the state. -\item createdAt string, -\item updatedAt string, -\item published boolean, \item serviceId integer, The ID of owning Service }} \item{currentUrl}{string, The URL that the service is hosted at.} -\item{previewUrl}{string, The URL that previews of the service are hosted at.} \item{environmentVariables}{list, Environment Variables to be passed into the Service.} \item{notifications}{list, A list containing the following elements: \itemize{ \item failureEmailAddresses array, Addresses to notify by e-mail when the service fails. \item failureOn boolean, If failure email notifications are on }} +\item{partitionLabel}{string, The partition label used to run this object.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} } diff --git a/man/services_put_transfer.Rd b/man/services_put_transfer.Rd new file mode 100644 index 00000000..5c62da71 --- /dev/null +++ b/man/services_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{services_put_transfer} +\alias{services_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +services_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/storage_hosts_list_dependencies.Rd b/man/storage_hosts_list_dependencies.Rd new file mode 100644 index 00000000..1c15f09f --- /dev/null +++ b/man/storage_hosts_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{storage_hosts_list_dependencies} +\alias{storage_hosts_list_dependencies} +\title{List dependent objects for this object} +\usage{ +storage_hosts_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/storage_hosts_put_transfer.Rd b/man/storage_hosts_put_transfer.Rd new file mode 100644 index 00000000..84dbdc35 --- /dev/null +++ b/man/storage_hosts_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{storage_hosts_put_transfer} +\alias{storage_hosts_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +storage_hosts_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/table_tags_delete.Rd b/man/table_tags_delete.Rd new file mode 100644 index 00000000..47aeb6f3 --- /dev/null +++ b/man/table_tags_delete.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{table_tags_delete} +\alias{table_tags_delete} +\title{Delete a Table Tag} +\usage{ +table_tags_delete(id) +} +\arguments{ +\item{id}{integer required.} +} +\value{ +An empty HTTP response +} +\description{ +Delete a Table Tag +} diff --git a/man/table_tags_get.Rd b/man/table_tags_get.Rd new file mode 100644 index 00000000..8ab6dbcc --- /dev/null +++ b/man/table_tags_get.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{table_tags_get} +\alias{table_tags_get} +\title{Get a Table Tag} +\usage{ +table_tags_get(id) +} +\arguments{ +\item{id}{integer required.} +} +\value{ +A list containing the following elements: +\item{id}{integer, Table Tag ID} +\item{name}{string, Table Tag Name} +\item{createdAt}{string, The date the tag was created.} +\item{updatedAt}{string, The date the tag was recently updated on.} +\item{tableCount}{integer, The total number of tables associated with the tag.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +} +\description{ +Get a Table Tag +} diff --git a/man/table_tags_list.Rd b/man/table_tags_list.Rd new file mode 100644 index 00000000..bc2a5df1 --- /dev/null +++ b/man/table_tags_list.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{table_tags_list} +\alias{table_tags_list} +\title{List Table Tags} +\usage{ +table_tags_list( + name = NULL, + limit = NULL, + page_num = NULL, + order = NULL, + order_dir = NULL +) +} +\arguments{ +\item{name}{string optional. Name of the tag. If it is provided, the results will be filtered by name} + +\item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} + +\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} + +\item{order}{string optional. The field on which to order the result set. Defaults to name. Must be one of: name, user, table_count.} + +\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc.} +} +\value{ +An array containing the following fields: +\item{id}{integer, Table Tag ID} +\item{name}{string, Table Tag Name} +\item{tableCount}{integer, The total number of tables associated with the tag.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +} +\description{ +List Table Tags +} diff --git a/man/table_tags_post.Rd b/man/table_tags_post.Rd new file mode 100644 index 00000000..87cf3403 --- /dev/null +++ b/man/table_tags_post.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{table_tags_post} +\alias{table_tags_post} +\title{Create a Table Tag} +\usage{ +table_tags_post(name) +} +\arguments{ +\item{name}{string required. Table Tag Name} +} +\value{ +A list containing the following elements: +\item{id}{integer, Table Tag ID} +\item{name}{string, Table Tag Name} +\item{createdAt}{string, The date the tag was created.} +\item{updatedAt}{string, The date the tag was recently updated on.} +\item{tableCount}{integer, The total number of tables associated with the tag.} +\item{user}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +} +\description{ +Create a Table Tag +} diff --git a/man/tables_delete_tags.Rd b/man/tables_delete_tags.Rd new file mode 100644 index 00000000..4d4bce1f --- /dev/null +++ b/man/tables_delete_tags.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{tables_delete_tags} +\alias{tables_delete_tags} +\title{Add a tag to a table} +\usage{ +tables_delete_tags(id, table_tag_id) +} +\arguments{ +\item{id}{integer required. The ID of the table.} + +\item{table_tag_id}{integer required. The ID of the tag.} +} +\value{ +An empty HTTP response +} +\description{ +Add a tag to a table +} diff --git a/man/tables_get.Rd b/man/tables_get.Rd index 3d7e6993..903179d5 100644 --- a/man/tables_get.Rd +++ b/man/tables_get.Rd @@ -39,6 +39,11 @@ A list containing the following elements: }} \item{primaryKeys}{array, The primary keys for this table.} \item{lastModifiedKeys}{array, The columns indicating an entry's modification status for this table.} +\item{tableTags}{array, An array containing the following fields: +\itemize{ +\item id integer, Table Tag ID +\item name string, Table Tag Name +}} \item{ontologyMapping}{list, The ontology-key to column-name mapping. See /ontology for the list of valid ontology keys.} \item{columns}{array, An array containing the following fields: \itemize{ @@ -51,8 +56,8 @@ A list containing the following elements: \item order integer, Relative position of the column in the table. \item minValue string, Smallest value in the column. \item maxValue string, Largest value in the column. -\item avgValue number, Average value of the column, where applicable. -\item stddev number, Stddev of the column, where applicable. +\item avgValue number, This parameter is deprecated. +\item stddev number, This parameter is deprecated. \item valueDistributionPercent object, A mapping between each value in the column and the percentage of rows with that value.Only present for tables with fewer than approximately 25,000,000 rows and for columns with fewer than twenty distinct values. \item coverageCount integer, Number of non-null values in the column. \item nullCount integer, Number of null values in the column. @@ -60,7 +65,7 @@ A list containing the following elements: \item useableAsIndependentVariable boolean, Whether the column may be used as an independent variable to train a model. \item useableAsPrimaryKey boolean, Whether the column may be used as an primary key to identify table rows. \item valueDistribution object, An object mapping distinct values in the column to the number of times they appear in the column -\item distinctCount integer, Number of distinct values in the column. +\item distinctCount integer, Number of distinct values in the column. NULL values are counted and treated as a single distinct value. }} \item{joins}{array, An array containing the following fields: \itemize{ diff --git a/man/tables_get_enhancements_cass_ncoa.Rd b/man/tables_get_enhancements_cass_ncoa.Rd index 820d31a7..b704cda3 100644 --- a/man/tables_get_enhancements_cass_ncoa.Rd +++ b/man/tables_get_enhancements_cass_ncoa.Rd @@ -21,6 +21,7 @@ A list containing the following elements: \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +\item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} } \description{ View the status of a CASS / NCOA table enhancement diff --git a/man/tables_list.Rd b/man/tables_list.Rd index fa457ecd..c0cce6a5 100644 --- a/man/tables_list.Rd +++ b/man/tables_list.Rd @@ -9,6 +9,8 @@ tables_list( schema = NULL, name = NULL, search = NULL, + table_tag_ids = NULL, + credential_id = NULL, limit = NULL, page_num = NULL, order = NULL, @@ -24,11 +26,15 @@ tables_list( \item{search}{string optional. If specified, will be used to filter the tables returned. Will search across schema and name (in the full form schema.name) and will return any full name containing the search string.} +\item{table_tag_ids}{array optional. If specified, will be used to filter the tables returned. Will search across Table Tags and will return any tables that have one of the matching Table Tags.} + +\item{credential_id}{integer optional. If specified, will be used instead of the default credential to filter the tables returned.} + \item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} \item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.} -\item{order}{string optional. The field on which to order the result set. Defaults to schema. Must be one of: schema, name, search.} +\item{order}{string optional. The field on which to order the result set. Defaults to schema. Must be one of: schema, name, search, table_tag_ids, credential_id.} \item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to asc.} } @@ -58,6 +64,11 @@ An array containing the following fields: \item finishedAt string, The time that the run completed. \item error string, The error message for this run, if present. }} +\item{tableTags}{array, An array containing the following fields: +\itemize{ +\item id integer, Table Tag ID +\item name string, Table Tag Name +}} } \description{ List tables diff --git a/man/tables_list_columns.Rd b/man/tables_list_columns.Rd index ff2ec437..22366cf0 100644 --- a/man/tables_list_columns.Rd +++ b/man/tables_list_columns.Rd @@ -37,8 +37,8 @@ An array containing the following fields: \item{order}{integer, Relative position of the column in the table.} \item{minValue}{string, Smallest value in the column.} \item{maxValue}{string, Largest value in the column.} -\item{avgValue}{number, Average value of the column, where applicable.} -\item{stddev}{number, Stddev of the column, where applicable.} +\item{avgValue}{number, This parameter is deprecated.} +\item{stddev}{number, This parameter is deprecated.} \item{valueDistributionPercent}{list, A mapping between each value in the column and the percentage of rows with that value.Only present for tables with fewer than approximately 25,000,000 rows and for columns with fewer than twenty distinct values.} \item{coverageCount}{integer, Number of non-null values in the column.} \item{nullCount}{integer, Number of null values in the column.} @@ -46,7 +46,7 @@ An array containing the following fields: \item{useableAsIndependentVariable}{boolean, Whether the column may be used as an independent variable to train a model.} \item{useableAsPrimaryKey}{boolean, Whether the column may be used as an primary key to identify table rows.} \item{valueDistribution}{list, An object mapping distinct values in the column to the number of times they appear in the column} -\item{distinctCount}{integer, Number of distinct values in the column.} +\item{distinctCount}{integer, Number of distinct values in the column. NULL values are counted and treated as a single distinct value.} } \description{ List columns in the specified table diff --git a/man/tables_patch.Rd b/man/tables_patch.Rd index 3c7d7f5d..084d6fb5 100644 --- a/man/tables_patch.Rd +++ b/man/tables_patch.Rd @@ -53,6 +53,11 @@ A list containing the following elements: }} \item{primaryKeys}{array, The primary keys for this table.} \item{lastModifiedKeys}{array, The columns indicating an entry's modification status for this table.} +\item{tableTags}{array, An array containing the following fields: +\itemize{ +\item id integer, Table Tag ID +\item name string, Table Tag Name +}} \item{ontologyMapping}{list, The ontology-key to column-name mapping. See /ontology for the list of valid ontology keys.} } \description{ diff --git a/man/tables_post_enhancements_cass_ncoa.Rd b/man/tables_post_enhancements_cass_ncoa.Rd index 1fcbfab5..16cb489d 100644 --- a/man/tables_post_enhancements_cass_ncoa.Rd +++ b/man/tables_post_enhancements_cass_ncoa.Rd @@ -8,7 +8,8 @@ tables_post_enhancements_cass_ncoa( source_table_id, perform_ncoa = NULL, ncoa_credential_id = NULL, - output_level = NULL + output_level = NULL, + chunk_size = NULL ) } \arguments{ @@ -19,6 +20,8 @@ tables_post_enhancements_cass_ncoa( \item{ncoa_credential_id}{integer optional. Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{output_level}{string optional. The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} + +\item{chunk_size}{integer optional. The maximum number of records processed at a time. Note that this parameter is not available to all users.} } \value{ A list containing the following elements: @@ -30,6 +33,7 @@ A list containing the following elements: \item{performNcoa}{boolean, Whether to update addresses for records matching the National Change of Address (NCOA) database.} \item{ncoaCredentialId}{integer, Credential to use when performing NCOA updates. Required if 'performNcoa' is true.} \item{outputLevel}{string, The set of fields persisted by a CASS or NCOA enhancement.For CASS enhancements, one of 'cass' or 'all.'For NCOA enhancements, one of 'cass', 'ncoa' , 'coalesced' or 'all'.By default, all fields will be returned.} +\item{chunkSize}{integer, The maximum number of records processed at a time. Note that this parameter is not available to all users.} } \description{ Standardize addresses in a table diff --git a/man/tables_post_refresh.Rd b/man/tables_post_refresh.Rd index c077d0e0..d402baf3 100644 --- a/man/tables_post_refresh.Rd +++ b/man/tables_post_refresh.Rd @@ -39,6 +39,11 @@ A list containing the following elements: }} \item{primaryKeys}{array, The primary keys for this table.} \item{lastModifiedKeys}{array, The columns indicating an entry's modification status for this table.} +\item{tableTags}{array, An array containing the following fields: +\itemize{ +\item id integer, Table Tag ID +\item name string, Table Tag Name +}} \item{ontologyMapping}{list, The ontology-key to column-name mapping. See /ontology for the list of valid ontology keys.} \item{columns}{array, An array containing the following fields: \itemize{ @@ -51,8 +56,8 @@ A list containing the following elements: \item order integer, Relative position of the column in the table. \item minValue string, Smallest value in the column. \item maxValue string, Largest value in the column. -\item avgValue number, Average value of the column, where applicable. -\item stddev number, Stddev of the column, where applicable. +\item avgValue number, This parameter is deprecated. +\item stddev number, This parameter is deprecated. \item valueDistributionPercent object, A mapping between each value in the column and the percentage of rows with that value.Only present for tables with fewer than approximately 25,000,000 rows and for columns with fewer than twenty distinct values. \item coverageCount integer, Number of non-null values in the column. \item nullCount integer, Number of null values in the column. @@ -60,7 +65,7 @@ A list containing the following elements: \item useableAsIndependentVariable boolean, Whether the column may be used as an independent variable to train a model. \item useableAsPrimaryKey boolean, Whether the column may be used as an primary key to identify table rows. \item valueDistribution object, An object mapping distinct values in the column to the number of times they appear in the column -\item distinctCount integer, Number of distinct values in the column. +\item distinctCount integer, Number of distinct values in the column. NULL values are counted and treated as a single distinct value. }} \item{joins}{array, An array containing the following fields: \itemize{ diff --git a/man/tables_put_tags.Rd b/man/tables_put_tags.Rd new file mode 100644 index 00000000..6a52f311 --- /dev/null +++ b/man/tables_put_tags.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{tables_put_tags} +\alias{tables_put_tags} +\title{Add a tag to a table} +\usage{ +tables_put_tags(id, table_tag_id) +} +\arguments{ +\item{id}{integer required. The ID of the table.} + +\item{table_tag_id}{integer required. The ID of the tag.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of the table.} +\item{tableTagId}{integer, The ID of the tag.} +} +\description{ +Add a tag to a table +} diff --git a/man/templates_get_reports.Rd b/man/templates_get_reports.Rd index 2a42c67c..60e7c688 100644 --- a/man/templates_get_reports.Rd +++ b/man/templates_get_reports.Rd @@ -18,6 +18,7 @@ A list containing the following elements: \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{archived}{boolean, Whether the template has been archived.} +\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. @@ -26,7 +27,6 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{authCodeUrl}{string, A URL to the template's stored code body.} \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} \item{hidden}{boolean, The hidden status of the item.} diff --git a/man/templates_get_scripts.Rd b/man/templates_get_scripts.Rd index 2d959f03..7243ae37 100644 --- a/man/templates_get_scripts.Rd +++ b/man/templates_get_scripts.Rd @@ -16,6 +16,17 @@ A list containing the following elements: \item{scriptId}{integer, The id of the script that this template uses.} \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} \item{userContext}{string, The user context of the script that this template uses.} +\item{params}{array, An array containing the following fields: +\itemize{ +\item name string, The variable's name as used within your code. +\item label string, The label to present to users when asking them for the value. +\item description string, A short sentence or fragment describing this parameter to the end user. +\item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +\item required boolean, Whether this param is required. +\item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +\item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +\item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +}} \item{name}{string, The name of the template.} \item{category}{string, The category of this template.} \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -26,6 +37,15 @@ A list containing the following elements: \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} \item{hidden}{boolean, The hidden status of the item.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Get a Script Template diff --git a/man/templates_list_reports.Rd b/man/templates_list_reports.Rd index 95a5c372..5d599572 100644 --- a/man/templates_list_reports.Rd +++ b/man/templates_list_reports.Rd @@ -6,6 +6,7 @@ \usage{ templates_list_reports( hidden = NULL, + author = NULL, category = NULL, limit = NULL, page_num = NULL, @@ -16,6 +17,8 @@ templates_list_reports( \arguments{ \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{category}{string optional. A category to filter results by, one of: dataset-viz} \item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} @@ -35,6 +38,7 @@ An array containing the following fields: \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{archived}{boolean, Whether the template has been archived.} +\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. @@ -43,7 +47,6 @@ An array containing the following fields: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} } \description{ List Report Templates diff --git a/man/templates_list_reports_dependencies.Rd b/man/templates_list_reports_dependencies.Rd new file mode 100644 index 00000000..41224214 --- /dev/null +++ b/man/templates_list_reports_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{templates_list_reports_dependencies} +\alias{templates_list_reports_dependencies} +\title{List dependent objects for this object} +\usage{ +templates_list_reports_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/templates_list_scripts.Rd b/man/templates_list_scripts.Rd index e1aef396..be698125 100644 --- a/man/templates_list_scripts.Rd +++ b/man/templates_list_scripts.Rd @@ -6,6 +6,7 @@ \usage{ templates_list_scripts( hidden = NULL, + author = NULL, category = NULL, limit = NULL, page_num = NULL, @@ -16,6 +17,8 @@ templates_list_scripts( \arguments{ \item{hidden}{boolean optional. If specified to be true, returns hidden items. Defaults to false, returning non-hidden items.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} + \item{category}{string optional. A category to filter results by, one of: import, export, enhancement, model, and script} \item{limit}{integer optional. Number of results to return. Defaults to 50. Maximum allowed is 1000.} @@ -40,6 +43,14 @@ An array containing the following fields: \item{uiReportId}{integer, The id of the report that this template uses.} \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} } \description{ List Script Templates diff --git a/man/templates_list_scripts_dependencies.Rd b/man/templates_list_scripts_dependencies.Rd new file mode 100644 index 00000000..7fa6639c --- /dev/null +++ b/man/templates_list_scripts_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{templates_list_scripts_dependencies} +\alias{templates_list_scripts_dependencies} +\title{List dependent objects for this object} +\usage{ +templates_list_scripts_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/templates_patch_reports.Rd b/man/templates_patch_reports.Rd index 598ab5dc..b9592286 100644 --- a/man/templates_patch_reports.Rd +++ b/man/templates_patch_reports.Rd @@ -35,6 +35,7 @@ A list containing the following elements: \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{archived}{boolean, Whether the template has been archived.} +\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. @@ -43,7 +44,6 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{authCodeUrl}{string, A URL to the template's stored code body.} \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} \item{hidden}{boolean, The hidden status of the item.} diff --git a/man/templates_patch_scripts.Rd b/man/templates_patch_scripts.Rd index 580e95be..2b8ed590 100644 --- a/man/templates_patch_scripts.Rd +++ b/man/templates_patch_scripts.Rd @@ -30,6 +30,17 @@ A list containing the following elements: \item{scriptId}{integer, The id of the script that this template uses.} \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} \item{userContext}{string, The user context of the script that this template uses.} +\item{params}{array, An array containing the following fields: +\itemize{ +\item name string, The variable's name as used within your code. +\item label string, The label to present to users when asking them for the value. +\item description string, A short sentence or fragment describing this parameter to the end user. +\item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +\item required boolean, Whether this param is required. +\item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +\item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +\item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +}} \item{name}{string, The name of the template.} \item{category}{string, The category of this template.} \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -40,6 +51,15 @@ A list containing the following elements: \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} \item{hidden}{boolean, The hidden status of the item.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Update some attributes of this Script Template diff --git a/man/templates_post_reports.Rd b/man/templates_post_reports.Rd index ce51452c..d58eff5b 100644 --- a/man/templates_post_reports.Rd +++ b/man/templates_post_reports.Rd @@ -35,6 +35,7 @@ A list containing the following elements: \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{archived}{boolean, Whether the template has been archived.} +\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. @@ -43,7 +44,6 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{authCodeUrl}{string, A URL to the template's stored code body.} \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} \item{hidden}{boolean, The hidden status of the item.} diff --git a/man/templates_post_reports_review.Rd b/man/templates_post_reports_review.Rd index 042665dd..84951bb3 100644 --- a/man/templates_post_reports_review.Rd +++ b/man/templates_post_reports_review.Rd @@ -20,6 +20,7 @@ A list containing the following elements: \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{archived}{boolean, Whether the template has been archived.} +\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. @@ -28,7 +29,6 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{authCodeUrl}{string, A URL to the template's stored code body.} \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} \item{hidden}{boolean, The hidden status of the item.} diff --git a/man/templates_post_scripts.Rd b/man/templates_post_scripts.Rd index 021d5a29..bdf782b8 100644 --- a/man/templates_post_scripts.Rd +++ b/man/templates_post_scripts.Rd @@ -33,6 +33,17 @@ A list containing the following elements: \item{scriptId}{integer, The id of the script that this template uses.} \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} \item{userContext}{string, The user context of the script that this template uses.} +\item{params}{array, An array containing the following fields: +\itemize{ +\item name string, The variable's name as used within your code. +\item label string, The label to present to users when asking them for the value. +\item description string, A short sentence or fragment describing this parameter to the end user. +\item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +\item required boolean, Whether this param is required. +\item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +\item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +\item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +}} \item{name}{string, The name of the template.} \item{category}{string, The category of this template.} \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -43,6 +54,15 @@ A list containing the following elements: \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} \item{hidden}{boolean, The hidden status of the item.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Create a Script Template diff --git a/man/templates_post_scripts_review.Rd b/man/templates_post_scripts_review.Rd index fe1ecd40..66968f93 100644 --- a/man/templates_post_scripts_review.Rd +++ b/man/templates_post_scripts_review.Rd @@ -18,6 +18,17 @@ A list containing the following elements: \item{scriptId}{integer, The id of the script that this template uses.} \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} \item{userContext}{string, The user context of the script that this template uses.} +\item{params}{array, An array containing the following fields: +\itemize{ +\item name string, The variable's name as used within your code. +\item label string, The label to present to users when asking them for the value. +\item description string, A short sentence or fragment describing this parameter to the end user. +\item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +\item required boolean, Whether this param is required. +\item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +\item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +\item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +}} \item{name}{string, The name of the template.} \item{category}{string, The category of this template.} \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -28,6 +39,15 @@ A list containing the following elements: \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} \item{hidden}{boolean, The hidden status of the item.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Review a template for security vulnerability and correctness (admin-only) diff --git a/man/templates_put_reports.Rd b/man/templates_put_reports.Rd index 2cd5436f..b1b5b3f4 100644 --- a/man/templates_put_reports.Rd +++ b/man/templates_put_reports.Rd @@ -35,6 +35,7 @@ A list containing the following elements: \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{archived}{boolean, Whether the template has been archived.} +\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. @@ -43,7 +44,6 @@ A list containing the following elements: \item initials string, This user's initials. \item online boolean, Whether this user is online. }} -\item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{authCodeUrl}{string, A URL to the template's stored code body.} \item{provideAPIKey}{boolean, Whether reports based on this template request an API Key from the report viewer.} \item{hidden}{boolean, The hidden status of the item.} diff --git a/man/templates_put_reports_transfer.Rd b/man/templates_put_reports_transfer.Rd new file mode 100644 index 00000000..5c64bfcc --- /dev/null +++ b/man/templates_put_reports_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{templates_put_reports_transfer} +\alias{templates_put_reports_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +templates_put_reports_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/templates_put_scripts.Rd b/man/templates_put_scripts.Rd index 51f1908c..d12e13a5 100644 --- a/man/templates_put_scripts.Rd +++ b/man/templates_put_scripts.Rd @@ -30,6 +30,17 @@ A list containing the following elements: \item{scriptId}{integer, The id of the script that this template uses.} \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} \item{userContext}{string, The user context of the script that this template uses.} +\item{params}{array, An array containing the following fields: +\itemize{ +\item name string, The variable's name as used within your code. +\item label string, The label to present to users when asking them for the value. +\item description string, A short sentence or fragment describing this parameter to the end user. +\item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom +\item required boolean, Whether this param is required. +\item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. +\item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. +\item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` +}} \item{name}{string, The name of the template.} \item{category}{string, The category of this template.} \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} @@ -40,6 +51,15 @@ A list containing the following elements: \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} \item{hidden}{boolean, The hidden status of the item.} +\item{author}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of this user. +\item name string, This user's name. +\item username string, This user's username. +\item initials string, This user's initials. +\item online boolean, Whether this user is online. +}} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Replace all attributes of this Script Template diff --git a/man/templates_put_scripts_transfer.Rd b/man/templates_put_scripts_transfer.Rd new file mode 100644 index 00000000..52aa77e7 --- /dev/null +++ b/man/templates_put_scripts_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{templates_put_scripts_transfer} +\alias{templates_put_scripts_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +templates_put_scripts_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} diff --git a/man/users_delete_2fa.Rd b/man/users_delete_2fa.Rd new file mode 100644 index 00000000..6cccef93 --- /dev/null +++ b/man/users_delete_2fa.Rd @@ -0,0 +1,59 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{users_delete_2fa} +\alias{users_delete_2fa} +\title{Wipes the user's current 2FA settings so that they must reset them upon next login} +\usage{ +users_delete_2fa(id) +} +\arguments{ +\item{id}{integer required. The ID of this user.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of this user.} +\item{user}{string, The username of this user.} +\item{name}{string, The name of this user.} +\item{email}{string, The email of this user.} +\item{active}{boolean, Whether this user account is active or deactivated.} +\item{primaryGroupId}{integer, The ID of the primary group of this user.} +\item{groups}{array, An array containing the following fields: +\itemize{ +\item id integer, The ID of this group. +\item name string, The name of this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. +}} +\item{city}{string, The city of this user.} +\item{state}{string, The state of this user.} +\item{timeZone}{string, The time zone of this user.} +\item{initials}{string, The initials of this user.} +\item{department}{string, The department of this user.} +\item{title}{string, The title of this user.} +\item{githubUsername}{string, The GitHub username of this user.} +\item{prefersSmsOtp}{boolean, The preference for phone authorization of this user} +\item{vpnEnabled}{boolean, The availability of vpn for this user.} +\item{ssoDisabled}{boolean, The availability of SSO for this user.} +\item{otpRequiredForLogin}{boolean, The two factor authentication requirement for this user.} +\item{exemptFromOrgSmsOtpDisabled}{boolean, Whether the user has SMS OTP enabled on an individual level. This field does not matter if the org does not have SMS OTP disabled.} +\item{smsOtpAllowed}{boolean, Whether the user is allowed to receive two factor authentication codes via SMS.} +\item{robot}{boolean, Whether the user is a robot.} +\item{phone}{string, The phone number of this user.} +\item{organizationSlug}{string, The slug of the organization the user belongs to.} +\item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} +\item{organizationLoginType}{string, The user's organization's login type.} +\item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{createdAt}{string, The date and time when the user was created.} +\item{updatedAt}{string, The date and time when the user was last updated.} +\item{lastSeenAt}{string, The date and time when the user last visited Platform.} +\item{suspended}{boolean, Whether the user is suspended due to inactivity.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +\item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +\item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} +} +\description{ +Wipes the user's current 2FA settings so that they must reset them upon next login +} diff --git a/man/users_delete_me_superadmin.Rd b/man/users_delete_me_superadmin.Rd new file mode 100644 index 00000000..9d58762e --- /dev/null +++ b/man/users_delete_me_superadmin.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{users_delete_me_superadmin} +\alias{users_delete_me_superadmin} +\title{Disables Superadmin Mode for the current user} +\usage{ +users_delete_me_superadmin() +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of this user.} +\item{name}{string, This user's name.} +\item{email}{string, This user's email address.} +\item{username}{string, This user's username.} +\item{initials}{string, This user's initials.} +\item{lastCheckedAnnouncements}{string, The date and time at which the user last checked their announcements.} +\item{featureFlags}{list, The feature flag settings for this user.} +\item{roles}{array, The roles this user has, listed by slug.} +\item{preferences}{list, This user's preferences.} +\item{customBranding}{string, The branding of Platform for this user.} +\item{primaryGroupId}{integer, The ID of the primary group of this user.} +\item{groups}{array, An array containing the following fields: +\itemize{ +\item id integer, The ID of this group. +\item name string, The name of this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. +}} +\item{organizationName}{string, The name of the organization the user belongs to.} +\item{organizationSlug}{string, The slug of the organization the user belongs to.} +\item{organizationDefaultThemeId}{integer, The ID of the organizations's default theme.} +\item{createdAt}{string, The date and time when the user was created.} +\item{signInCount}{integer, The number of times the user has signed in.} +\item{assumingRole}{boolean, Whether the user is assuming a role or not.} +\item{assumingAdmin}{boolean, Whether the user is assuming admin.} +\item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +\item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +\item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +} +\description{ +Disables Superadmin Mode for the current user +} diff --git a/man/users_delete_sessions.Rd b/man/users_delete_sessions.Rd new file mode 100644 index 00000000..b18d1131 --- /dev/null +++ b/man/users_delete_sessions.Rd @@ -0,0 +1,59 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{users_delete_sessions} +\alias{users_delete_sessions} +\title{Terminate all of the user's active sessions (must be a team or org admin)} +\usage{ +users_delete_sessions(id) +} +\arguments{ +\item{id}{integer required. The ID of this user.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of this user.} +\item{user}{string, The username of this user.} +\item{name}{string, The name of this user.} +\item{email}{string, The email of this user.} +\item{active}{boolean, Whether this user account is active or deactivated.} +\item{primaryGroupId}{integer, The ID of the primary group of this user.} +\item{groups}{array, An array containing the following fields: +\itemize{ +\item id integer, The ID of this group. +\item name string, The name of this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. +}} +\item{city}{string, The city of this user.} +\item{state}{string, The state of this user.} +\item{timeZone}{string, The time zone of this user.} +\item{initials}{string, The initials of this user.} +\item{department}{string, The department of this user.} +\item{title}{string, The title of this user.} +\item{githubUsername}{string, The GitHub username of this user.} +\item{prefersSmsOtp}{boolean, The preference for phone authorization of this user} +\item{vpnEnabled}{boolean, The availability of vpn for this user.} +\item{ssoDisabled}{boolean, The availability of SSO for this user.} +\item{otpRequiredForLogin}{boolean, The two factor authentication requirement for this user.} +\item{exemptFromOrgSmsOtpDisabled}{boolean, Whether the user has SMS OTP enabled on an individual level. This field does not matter if the org does not have SMS OTP disabled.} +\item{smsOtpAllowed}{boolean, Whether the user is allowed to receive two factor authentication codes via SMS.} +\item{robot}{boolean, Whether the user is a robot.} +\item{phone}{string, The phone number of this user.} +\item{organizationSlug}{string, The slug of the organization the user belongs to.} +\item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} +\item{organizationLoginType}{string, The user's organization's login type.} +\item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{createdAt}{string, The date and time when the user was created.} +\item{updatedAt}{string, The date and time when the user was last updated.} +\item{lastSeenAt}{string, The date and time when the user last visited Platform.} +\item{suspended}{boolean, Whether the user is suspended due to inactivity.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +\item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +\item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} +} +\description{ +Terminate all of the user's active sessions (must be a team or org admin) +} diff --git a/man/users_get.Rd b/man/users_get.Rd index bfe837aa..6f09b312 100644 --- a/man/users_get.Rd +++ b/man/users_get.Rd @@ -15,13 +15,15 @@ A list containing the following elements: \item{user}{string, The username of this user.} \item{name}{string, The name of this user.} \item{email}{string, The email of this user.} -\item{active}{boolean, The account status of this user.} +\item{active}{boolean, Whether this user account is active or deactivated.} \item{primaryGroupId}{integer, The ID of the primary group of this user.} \item{groups}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. -\item organizationId integer, The organization associated with this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. }} \item{city}{string, The city of this user.} \item{state}{string, The state of this user.} @@ -42,6 +44,15 @@ A list containing the following elements: \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} \item{organizationLoginType}{string, The user's organization's login type.} \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{createdAt}{string, The date and time when the user was created.} +\item{updatedAt}{string, The date and time when the user was last updated.} +\item{lastSeenAt}{string, The date and time when the user last visited Platform.} +\item{suspended}{boolean, Whether the user is suspended due to inactivity.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +\item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +\item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} } \description{ Show info about a user diff --git a/man/users_get_me_themes.Rd b/man/users_get_me_themes.Rd new file mode 100644 index 00000000..1425e6f7 --- /dev/null +++ b/man/users_get_me_themes.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{users_get_me_themes} +\alias{users_get_me_themes} +\title{Show a theme} +\usage{ +users_get_me_themes(id) +} +\arguments{ +\item{id}{integer required. The ID of this theme.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of this theme.} +\item{name}{string, The name of this theme.} +\item{organizationIds}{array, List of organization ID's allowed to use this theme.} +\item{settings}{string, The theme configuration object.} +\item{logoFile}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID of the logo image file. +\item downloadUrl string, The URL of the logo image file. +}} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +Show a theme +} diff --git a/man/users_list.Rd b/man/users_list.Rd index 68711db9..b3234b13 100644 --- a/man/users_list.Rd +++ b/man/users_list.Rd @@ -9,6 +9,7 @@ users_list( account_status = NULL, query = NULL, group_id = NULL, + group_ids = NULL, organization_id = NULL, exclude_groups = NULL, limit = NULL, @@ -20,13 +21,15 @@ users_list( \arguments{ \item{feature_flag}{string optional. Return users that have a feature flag enabled.} -\item{account_status}{string optional. The account status by which to filter users. May be one of "active", "inactive", or "all".} +\item{account_status}{string optional. The account status by which to filter users. May be one of "active", "inactive", or "all". Defaults to active.} -\item{query}{string optional. Return users who match the given query, based on name, user, and email.} +\item{query}{string optional. Return users who match the given query, based on name, user, email, and id.} -\item{group_id}{integer optional. The ID of the group by which to filter users. Cannot be present if organization_id is.} +\item{group_id}{integer optional. The ID of the group by which to filter users. Cannot be present if group_ids is.} -\item{organization_id}{integer optional. The ID of the organization by which to filter users. Cannot be present if group_id is.} +\item{group_ids}{array optional. The IDs of the groups by which to filter users. Cannot be present if group_id is.} + +\item{organization_id}{integer optional. The ID of the organization by which to filter users.} \item{exclude_groups}{boolean optional. Whether or to exclude users' groups. Default: false.} @@ -44,16 +47,23 @@ An array containing the following fields: \item{user}{string, The username of this user.} \item{name}{string, The name of this user.} \item{email}{string, The email of this user.} -\item{active}{boolean, The account status of this user.} +\item{active}{boolean, Whether this user account is active or deactivated.} \item{primaryGroupId}{integer, The ID of the primary group of this user.} \item{groups}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. -\item organizationId integer, The organization associated with this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. }} \item{createdAt}{string, The date and time when the user was created.} \item{currentSignInAt}{string, The date and time when the user's current session began.} +\item{updatedAt}{string, The date and time when the user was last updated.} +\item{lastSeenAt}{string, The date and time when the user last visited Platform.} +\item{suspended}{boolean, Whether the user is suspended due to inactivity.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} } \description{ List users diff --git a/man/users_list_me.Rd b/man/users_list_me.Rd index 292e2472..048ff0b8 100644 --- a/man/users_list_me.Rd +++ b/man/users_list_me.Rd @@ -23,7 +23,9 @@ A list containing the following elements: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. -\item organizationId integer, The organization associated with this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. }} \item{organizationName}{string, The name of the organization the user belongs to.} \item{organizationSlug}{string, The slug of the organization the user belongs to.} @@ -33,6 +35,10 @@ A list containing the following elements: \item{assumingRole}{boolean, Whether the user is assuming a role or not.} \item{assumingAdmin}{boolean, Whether the user is assuming admin.} \item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +\item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +\item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} } \description{ Show info about the logged-in user diff --git a/man/users_list_me_themes.Rd b/man/users_list_me_themes.Rd new file mode 100644 index 00000000..158fc246 --- /dev/null +++ b/man/users_list_me_themes.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{users_list_me_themes} +\alias{users_list_me_themes} +\title{List themes} +\usage{ +users_list_me_themes() +} +\value{ +An array containing the following fields: +\item{id}{integer, The ID of this theme.} +\item{name}{string, The name of this theme.} +\item{createdAt}{string, } +\item{updatedAt}{string, } +} +\description{ +List themes +} diff --git a/man/users_list_me_ui.Rd b/man/users_list_me_ui.Rd index 702e47c4..493450c0 100644 --- a/man/users_list_me_ui.Rd +++ b/man/users_list_me_ui.Rd @@ -15,8 +15,8 @@ A list containing the following elements: \itemize{ \item vendor boolean, This attribute is deprecated \item media boolean, True if user has access to the Media Optimizer job type. -\item mainApp string, The slug for the main app for an app-only user account. -\item appCount integer, Number of apps this user has access to. +\item mainApp string, This attribute is deprecated +\item appCount integer, This attribute is deprecated \item reportsOnly boolean, True if user is a reports-only user. \item reportsCreator boolean, True if this user is allowed to create HTML reports. }} diff --git a/man/users_patch.Rd b/man/users_patch.Rd index 67250a78..95a80342 100644 --- a/man/users_patch.Rd +++ b/man/users_patch.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{users_patch} \alias{users_patch} -\title{Update info about a user (must be an admin or client user admin)} +\title{Update info about a user (must be a team or org admin)} \usage{ users_patch( id, @@ -24,7 +24,8 @@ users_patch( exempt_from_org_sms_otp_disabled = NULL, robot = NULL, phone = NULL, - password = NULL + password = NULL, + account_status = NULL ) } \arguments{ @@ -34,7 +35,7 @@ users_patch( \item{email}{string optional. The email of this user.} -\item{active}{boolean optional. The account status of this user.} +\item{active}{boolean optional. Whether this user account is active or deactivated.} \item{primary_group_id}{integer optional. The ID of the primary group of this user.} @@ -67,6 +68,8 @@ users_patch( \item{phone}{string optional. The phone number of this user.} \item{password}{string optional. The password of this user.} + +\item{account_status}{string optional. Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} } \value{ A list containing the following elements: @@ -74,13 +77,15 @@ A list containing the following elements: \item{user}{string, The username of this user.} \item{name}{string, The name of this user.} \item{email}{string, The email of this user.} -\item{active}{boolean, The account status of this user.} +\item{active}{boolean, Whether this user account is active or deactivated.} \item{primaryGroupId}{integer, The ID of the primary group of this user.} \item{groups}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. -\item organizationId integer, The organization associated with this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. }} \item{city}{string, The city of this user.} \item{state}{string, The state of this user.} @@ -101,7 +106,16 @@ A list containing the following elements: \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} \item{organizationLoginType}{string, The user's organization's login type.} \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{createdAt}{string, The date and time when the user was created.} +\item{updatedAt}{string, The date and time when the user was last updated.} +\item{lastSeenAt}{string, The date and time when the user last visited Platform.} +\item{suspended}{boolean, Whether the user is suspended due to inactivity.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +\item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +\item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} } \description{ -Update info about a user (must be an admin or client user admin) +Update info about a user (must be a team or org admin) } diff --git a/man/users_patch_me.Rd b/man/users_patch_me.Rd index 018d5ae5..b518b45d 100644 --- a/man/users_patch_me.Rd +++ b/man/users_patch_me.Rd @@ -9,13 +9,13 @@ users_patch_me(preferences = NULL, last_checked_announcements = NULL) \arguments{ \item{preferences}{list optional. A list containing the following elements: \itemize{ -\item appIndexOrderField string, Order field for the apps index pages. -\item appIndexOrderDir string, Order direction for the apps index pages. -\item resultIndexOrderField string, Order field for the results index page. -\item resultIndexOrderDir string, Order direction for the results index page. -\item resultIndexTypeFilter string, Type filter for the results index page. -\item resultIndexAuthorFilter string, Author filter for the results index page. -\item resultIndexArchivedFilter string, Archived filter for the results index page. +\item appIndexOrderField string, This attribute is deprecated +\item appIndexOrderDir string, This attribute is deprecated +\item resultIndexOrderField string, Order field for the reports index page. +\item resultIndexOrderDir string, Order direction for the reports index page. +\item resultIndexTypeFilter string, Type filter for the reports index page. +\item resultIndexAuthorFilter string, Author filter for the reports index page. +\item resultIndexArchivedFilter string, Archived filter for the reports index page. \item importIndexOrderField string, Order field for the imports index page. \item importIndexOrderDir string, Order direction for the imports index page. \item importIndexTypeFilter string, Type filter for the imports index page. @@ -80,6 +80,9 @@ users_patch_me(preferences = NULL, last_checked_announcements = NULL) \item serviceOrderDir string, Order direction for the services page. \item serviceAuthorFilter string, Author filter for the services page. \item serviceArchivedFilter string, Archived filter for the services page. +\item assumeRoleHistory string, JSON string of previously assumed roles. +\item defaultSuccessNotificationsOn boolean, Whether email notifications for the success of all applicable jobs are on by default. +\item defaultFailureNotificationsOn boolean, Whether email notifications for the failure of all applicable jobs are on by default. }} \item{last_checked_announcements}{string optional. The date and time at which the user last checked their announcements.} @@ -101,7 +104,9 @@ A list containing the following elements: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. -\item organizationId integer, The organization associated with this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. }} \item{organizationName}{string, The name of the organization the user belongs to.} \item{organizationSlug}{string, The slug of the organization the user belongs to.} @@ -111,6 +116,10 @@ A list containing the following elements: \item{assumingRole}{boolean, Whether the user is assuming a role or not.} \item{assumingAdmin}{boolean, Whether the user is assuming admin.} \item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +\item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +\item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} } \description{ Update info about the logged-in user diff --git a/man/users_post.Rd b/man/users_post.Rd index 62c7463e..0c649781 100644 --- a/man/users_post.Rd +++ b/man/users_post.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{users_post} \alias{users_post} -\title{Create a new user (must be an admin or client user admin)} +\title{Create a new user (must be a team or org admin)} \usage{ users_post( name, @@ -35,7 +35,7 @@ users_post( \item{user}{string required. The username of this user.} -\item{active}{boolean optional. The account status of this user.} +\item{active}{boolean optional. Whether this user account is active or deactivated.} \item{city}{string optional. The city of this user.} @@ -71,13 +71,15 @@ A list containing the following elements: \item{user}{string, The username of this user.} \item{name}{string, The name of this user.} \item{email}{string, The email of this user.} -\item{active}{boolean, The account status of this user.} +\item{active}{boolean, Whether this user account is active or deactivated.} \item{primaryGroupId}{integer, The ID of the primary group of this user.} \item{groups}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. -\item organizationId integer, The organization associated with this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. }} \item{city}{string, The city of this user.} \item{state}{string, The state of this user.} @@ -98,7 +100,16 @@ A list containing the following elements: \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} \item{organizationLoginType}{string, The user's organization's login type.} \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} +\item{createdAt}{string, The date and time when the user was created.} +\item{updatedAt}{string, The date and time when the user was last updated.} +\item{lastSeenAt}{string, The date and time when the user last visited Platform.} +\item{suspended}{boolean, Whether the user is suspended due to inactivity.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +\item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} +\item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} } \description{ -Create a new user (must be an admin or client user admin) +Create a new user (must be a team or org admin) } diff --git a/man/users_post_me_superadmin.Rd b/man/users_post_me_superadmin.Rd new file mode 100644 index 00000000..2ede00ae --- /dev/null +++ b/man/users_post_me_superadmin.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{users_post_me_superadmin} +\alias{users_post_me_superadmin} +\title{Enables Superadmin Mode for the current user} +\usage{ +users_post_me_superadmin() +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of this user.} +\item{name}{string, This user's name.} +\item{email}{string, This user's email address.} +\item{username}{string, This user's username.} +\item{initials}{string, This user's initials.} +\item{lastCheckedAnnouncements}{string, The date and time at which the user last checked their announcements.} +\item{featureFlags}{list, The feature flag settings for this user.} +\item{roles}{array, The roles this user has, listed by slug.} +\item{preferences}{list, This user's preferences.} +\item{customBranding}{string, The branding of Platform for this user.} +\item{primaryGroupId}{integer, The ID of the primary group of this user.} +\item{groups}{array, An array containing the following fields: +\itemize{ +\item id integer, The ID of this group. +\item name string, The name of this group. +\item slug string, The slug of this group. +\item organizationId integer, The ID of the organization associated with this group. +\item organizationName string, The name of the organization associated with this group. +}} +\item{organizationName}{string, The name of the organization the user belongs to.} +\item{organizationSlug}{string, The slug of the organization the user belongs to.} +\item{organizationDefaultThemeId}{integer, The ID of the organizations's default theme.} +\item{createdAt}{string, The date and time when the user was created.} +\item{signInCount}{integer, The number of times the user has signed in.} +\item{assumingRole}{boolean, Whether the user is assuming a role or not.} +\item{assumingAdmin}{boolean, Whether the user is assuming admin.} +\item{assumingAdminExpiration}{string, When the user's admin role is set to expire.} +\item{superadminModeExpiration}{string, The user is in superadmin mode when set to a DateTime. The user is not in superadmin mode when set to null.} +\item{disableNonCompliantFedrampFeatures}{boolean, Whether to disable non-compliant fedramp features.} +\item{createdById}{integer, The ID of the user who created this user.} +\item{lastUpdatedById}{integer, The ID of the user who last updated this user.} +} +\description{ +Enables Superadmin Mode for the current user +} diff --git a/man/users_post_unsuspend.Rd b/man/users_post_unsuspend.Rd new file mode 100644 index 00000000..e9966690 --- /dev/null +++ b/man/users_post_unsuspend.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{users_post_unsuspend} +\alias{users_post_unsuspend} +\title{Unsuspends user} +\usage{ +users_post_unsuspend(id) +} +\arguments{ +\item{id}{integer required. The ID of this user.} +} +\value{ +A list containing the following elements: +\item{id}{integer, The ID of this user.} +\item{user}{string, The username of this user.} +\item{unlockedAt}{string, The time the user's account was unsuspended} +} +\description{ +Unsuspends user +} diff --git a/man/workflows_get.Rd b/man/workflows_get.Rd index fe5b6f23..9ddd35ac 100644 --- a/man/workflows_get.Rd +++ b/man/workflows_get.Rd @@ -30,10 +30,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} @@ -51,6 +52,7 @@ A list containing the following elements: }} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, } \item{updatedAt}{string, } } diff --git a/man/workflows_get_executions_tasks.Rd b/man/workflows_get_executions_tasks.Rd index f883f05a..364c6579 100644 --- a/man/workflows_get_executions_tasks.Rd +++ b/man/workflows_get_executions_tasks.Rd @@ -22,6 +22,7 @@ A list containing the following elements: \itemize{ \item id integer, The ID of the run. \item jobId integer, The ID of the job associated with the run. +\item myPermissionLevel string, Your permission level on the job. One of "read", "write", "manage", or "nil". \item state string, The state of the run. \item createdAt string, The time that the run was queued. \item startedAt string, The time that the run started. @@ -31,6 +32,7 @@ A list containing the following elements: \itemize{ \item id integer, The ID of the execution. \item workflowId integer, The ID of the workflow associated with the execution. +\item myPermissionLevel string, Your permission level on the workflow. One of "read", "write", "manage", or "nil". \item state string, The state of this workflow execution. \item createdAt string, The time this execution was created. \item startedAt string, The time this execution started. diff --git a/man/workflows_get_git_commits.Rd b/man/workflows_get_git_commits.Rd index 0b8465fd..29839c6c 100644 --- a/man/workflows_get_git_commits.Rd +++ b/man/workflows_get_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{workflows_get_git_commits} \alias{workflows_get_git_commits} -\title{Get file contents at commit_hash} +\title{Get file contents at git ref} \usage{ workflows_get_git_commits(id, commit_hash) } @@ -19,5 +19,5 @@ A list containing the following elements: \item{fileHash}{string, The SHA of the file.} } \description{ -Get file contents at commit_hash +Get file contents at git ref } diff --git a/man/workflows_list.Rd b/man/workflows_list.Rd index d8af95d4..724e0ac2 100644 --- a/man/workflows_list.Rd +++ b/man/workflows_list.Rd @@ -21,7 +21,7 @@ workflows_list( \item{archived}{string optional. The archival status of the requested item(s).} -\item{author}{string optional. Author of the workflow. It accepts a comma-separated list of author ids.} +\item{author}{string optional. If specified, return items from any of these authors. It accepts a comma-separated list of user IDs.} \item{state}{array optional. State of the most recent execution.One or more of queued, running, succeeded, failed, cancelled, idle, and scheduled.} @@ -54,10 +54,11 @@ An array containing the following fields: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} diff --git a/man/workflows_list_dependencies.Rd b/man/workflows_list_dependencies.Rd new file mode 100644 index 00000000..6b5386f1 --- /dev/null +++ b/man/workflows_list_dependencies.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{workflows_list_dependencies} +\alias{workflows_list_dependencies} +\title{List dependent objects for this object} +\usage{ +workflows_list_dependencies(id, user_id = NULL) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer optional. ID of target user} +} +\value{ +An array containing the following fields: +\item{objectType}{string, Dependent object type} +\item{fcoType}{string, Human readable dependent object type} +\item{id}{integer, Dependent object ID} +\item{name}{string, Dependent object name, or nil if the requesting user cannot read this object} +\item{permissionLevel}{string, Permission level of target user (not user's groups) for dependent object, or null if no target user} +\item{shareable}{boolean, Whether or not the requesting user can share this object.} +} +\description{ +List dependent objects for this object +} diff --git a/man/workflows_list_git.Rd b/man/workflows_list_git.Rd index cbe9de38..626891c3 100644 --- a/man/workflows_list_git.Rd +++ b/man/workflows_list_git.Rd @@ -11,7 +11,7 @@ workflows_list_git(id) } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -21,7 +21,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Get the git metadata attached to an item diff --git a/man/workflows_list_git_commits.Rd b/man/workflows_list_git_commits.Rd index 18c2760e..bce88441 100644 --- a/man/workflows_list_git_commits.Rd +++ b/man/workflows_list_git_commits.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/generated_client.R \name{workflows_list_git_commits} \alias{workflows_list_git_commits} -\title{Get the git commits for an item} +\title{Get the git commits for an item on the current branch} \usage{ workflows_list_git_commits(id) } @@ -17,5 +17,5 @@ A list containing the following elements: \item{message}{string, The commit message.} } \description{ -Get the git commits for an item +Get the git commits for an item on the current branch } diff --git a/man/workflows_patch.Rd b/man/workflows_patch.Rd index 7ad5cec1..69ea4012 100644 --- a/man/workflows_patch.Rd +++ b/man/workflows_patch.Rd @@ -27,10 +27,11 @@ workflows_patch( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allow_concurrent_executions}{boolean optional. Whether the workflow can execute when already running.} @@ -70,10 +71,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} @@ -91,6 +93,7 @@ A list containing the following elements: }} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, } \item{updatedAt}{string, } } diff --git a/man/workflows_patch_git.Rd b/man/workflows_patch_git.Rd new file mode 100644 index 00000000..b120f90f --- /dev/null +++ b/man/workflows_patch_git.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{workflows_patch_git} +\alias{workflows_patch_git} +\title{Update an attached git file} +\usage{ +workflows_patch_git( + id, + git_ref = NULL, + git_branch = NULL, + git_path = NULL, + git_repo_url = NULL, + git_ref_type = NULL, + pull_from_git = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the file.} + +\item{git_ref}{string optional. A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} + +\item{git_branch}{string optional. The git branch that the file is on.} + +\item{git_path}{string optional. The path of the file in the repository.} + +\item{git_repo_url}{string optional. The URL of the git repository.} + +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + +\item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} +} +\value{ +A list containing the following elements: +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} +\item{gitBranch}{string, The git branch that the file is on.} +\item{gitPath}{string, The path of the file in the repository.} +\item{gitRepo}{list, A list containing the following elements: +\itemize{ +\item id integer, The ID for this git repository. +\item repoUrl string, The URL for this git repository. +\item createdAt string, +\item updatedAt string, +}} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} +} +\description{ +Update an attached git file +} diff --git a/man/workflows_post.Rd b/man/workflows_post.Rd index b4b51a3d..1a5acb9e 100644 --- a/man/workflows_post.Rd +++ b/man/workflows_post.Rd @@ -28,10 +28,11 @@ workflows_post( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allow_concurrent_executions}{boolean optional. Whether the workflow can execute when already running.} @@ -73,10 +74,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} @@ -94,6 +96,7 @@ A list containing the following elements: }} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, } \item{updatedAt}{string, } } diff --git a/man/workflows_post_clone.Rd b/man/workflows_post_clone.Rd index bec58061..889a2aad 100644 --- a/man/workflows_post_clone.Rd +++ b/man/workflows_post_clone.Rd @@ -34,10 +34,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} @@ -55,6 +56,7 @@ A list containing the following elements: }} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, } \item{updatedAt}{string, } } diff --git a/man/workflows_post_executions.Rd b/man/workflows_post_executions.Rd index f79e6e2f..b32ff30a 100644 --- a/man/workflows_post_executions.Rd +++ b/man/workflows_post_executions.Rd @@ -18,7 +18,7 @@ workflows_post_executions( \item{input}{list optional. Key-value pairs to send to this execution as inputs.} -\item{included_tasks}{array optional. If specified, executes only the subset of workflow tasks included.} +\item{included_tasks}{array optional. If specified, executes only the subset of workflow tasks included as specified by task name.} } \value{ A list containing the following elements: diff --git a/man/workflows_post_git_checkout.Rd b/man/workflows_post_git_checkout.Rd new file mode 100644 index 00000000..62c162b2 --- /dev/null +++ b/man/workflows_post_git_checkout.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{workflows_post_git_checkout} +\alias{workflows_post_git_checkout} +\title{Checkout content that the existing git_ref points to and save to the object} +\usage{ +workflows_post_git_checkout(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout content that the existing git_ref points to and save to the object +} diff --git a/man/workflows_post_git_checkout_latest.Rd b/man/workflows_post_git_checkout_latest.Rd new file mode 100644 index 00000000..c11ba95e --- /dev/null +++ b/man/workflows_post_git_checkout_latest.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{workflows_post_git_checkout_latest} +\alias{workflows_post_git_checkout_latest} +\title{Checkout latest commit on the current branch of a script or workflow} +\usage{ +workflows_post_git_checkout_latest(id) +} +\arguments{ +\item{id}{integer required. The ID of the file.} +} +\value{ +A list containing the following elements: +\item{content}{string, The file's contents.} +\item{type}{string, The file's type.} +\item{size}{integer, The file's size.} +\item{fileHash}{string, The SHA of the file.} +} +\description{ +Checkout latest commit on the current branch of a script or workflow +} diff --git a/man/workflows_put.Rd b/man/workflows_put.Rd index 948e2123..16a5bd9f 100644 --- a/man/workflows_put.Rd +++ b/man/workflows_put.Rd @@ -27,10 +27,11 @@ workflows_put( \item{schedule}{list optional. A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allow_concurrent_executions}{boolean optional. Whether the workflow can execute when already running.} @@ -70,10 +71,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} @@ -91,6 +93,7 @@ A list containing the following elements: }} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, } \item{updatedAt}{string, } } diff --git a/man/workflows_put_archive.Rd b/man/workflows_put_archive.Rd index 2d9ae4f8..1a8d270c 100644 --- a/man/workflows_put_archive.Rd +++ b/man/workflows_put_archive.Rd @@ -32,10 +32,11 @@ A list containing the following elements: \item{schedule}{list, A list containing the following elements: \itemize{ \item scheduled boolean, If the item is scheduled. -\item scheduledDays array, Day based on numeric value starting at 0 for Sunday. +\item scheduledDays array, Days of the week, based on numeric value starting at 0 for Sunday. Mutually exclusive with scheduledDaysOfMonth \item scheduledHours array, Hours of the day it is scheduled on. \item scheduledMinutes array, Minutes of the day it is scheduled on. -\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour. +\item scheduledRunsPerHour integer, Deprecated in favor of scheduled minutes. +\item scheduledDaysOfMonth array, Days of the month it is scheduled on, mutually exclusive with scheduledDays. }} \item{allowConcurrentExecutions}{boolean, Whether the workflow can execute when already running.} \item{timeZone}{string, The time zone of this workflow.} @@ -53,6 +54,7 @@ A list containing the following elements: }} \item{archived}{string, The archival status of the requested item(s).} \item{hidden}{boolean, The hidden status of the item.} +\item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, } \item{updatedAt}{string, } } diff --git a/man/workflows_put_git.Rd b/man/workflows_put_git.Rd index a464f1a8..86a77ef4 100644 --- a/man/workflows_put_git.Rd +++ b/man/workflows_put_git.Rd @@ -10,6 +10,7 @@ workflows_put_git( git_branch = NULL, git_path = NULL, git_repo_url = NULL, + git_ref_type = NULL, pull_from_git = NULL ) } @@ -24,11 +25,13 @@ workflows_put_git( \item{git_repo_url}{string optional. The URL of the git repository.} +\item{git_ref_type}{string optional. Specifies if the file is versioned by branch or tag.} + \item{pull_from_git}{boolean optional. Automatically pull latest commit from git. Only works for scripts.} } \value{ A list containing the following elements: -\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, or the full or shortened SHA of a commit.} +\item{gitRef}{string, A git reference specifying an unambiguous version of the file. Can be a branch name, tag or the full or shortened SHA of a commit.} \item{gitBranch}{string, The git branch that the file is on.} \item{gitPath}{string, The path of the file in the repository.} \item{gitRepo}{list, A list containing the following elements: @@ -38,7 +41,8 @@ A list containing the following elements: \item createdAt string, \item updatedAt string, }} -\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts.} +\item{gitRefType}{string, Specifies if the file is versioned by branch or tag.} +\item{pullFromGit}{boolean, Automatically pull latest commit from git. Only works for scripts and workflows (assuming you have the feature enabled)} } \description{ Attach an item to a file in a git repo diff --git a/man/workflows_put_transfer.Rd b/man/workflows_put_transfer.Rd new file mode 100644 index 00000000..e1b5fe8e --- /dev/null +++ b/man/workflows_put_transfer.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/generated_client.R +\name{workflows_put_transfer} +\alias{workflows_put_transfer} +\title{Transfer ownership of this object to another user} +\usage{ +workflows_put_transfer( + id, + user_id, + include_dependencies, + email_body = NULL, + send_email = NULL +) +} +\arguments{ +\item{id}{integer required. The ID of the resource that is shared.} + +\item{user_id}{integer required. ID of target user} + +\item{include_dependencies}{boolean required. Whether or not to give manage permissions on all dependencies} + +\item{email_body}{string optional. Custom body text for e-mail sent on transfer.} + +\item{send_email}{boolean optional. Send email to the target user of the transfer?} +} +\value{ +A list containing the following elements: +\item{dependencies}{array, An array containing the following fields: +\itemize{ +\item objectType string, Dependent object type +\item fcoType string, Human readable dependent object type +\item id integer, Dependent object ID +\item name string, Dependent object name, or nil if the requesting user cannot read this object +\item permissionLevel string, Permission level of target user (not user's groups) for dependent object, or null if no target user +\item shared boolean, Whether dependent object was successfully shared with target user +}} +} +\description{ +Transfer ownership of this object to another user +} From da8685aeca36911dde76f3468123e309f9cb9ddd Mon Sep 17 00:00:00 2001 From: pcooman Date: Mon, 13 Feb 2023 14:27:24 -0600 Subject: [PATCH 03/22] re-build documentation --- Meta/vignette.rds | Bin 0 -> 354 bytes doc/civis_ml.R | 172 +++++++ doc/civis_ml.Rmd | 357 +++++++++++++ doc/civis_ml.html | 874 ++++++++++++++++++++++++++++++++ doc/civis_scripts.R | 199 ++++++++ doc/civis_scripts.Rmd | 382 ++++++++++++++ doc/civis_scripts.html | 697 +++++++++++++++++++++++++ doc/concurrency.R | 86 ++++ doc/concurrency.Rmd | 157 ++++++ doc/concurrency.html | 499 ++++++++++++++++++ doc/data_import_and_export.R | 81 +++ doc/data_import_and_export.Rmd | 240 +++++++++ doc/data_import_and_export.html | 551 ++++++++++++++++++++ doc/quick_start.R | 57 +++ doc/quick_start.Rmd | 103 ++++ doc/quick_start.html | 441 ++++++++++++++++ 16 files changed, 4896 insertions(+) create mode 100644 Meta/vignette.rds create mode 100644 doc/civis_ml.R create mode 100644 doc/civis_ml.Rmd create mode 100644 doc/civis_ml.html create mode 100644 doc/civis_scripts.R create mode 100644 doc/civis_scripts.Rmd create mode 100644 doc/civis_scripts.html create mode 100644 doc/concurrency.R create mode 100644 doc/concurrency.Rmd create mode 100644 doc/concurrency.html create mode 100644 doc/data_import_and_export.R create mode 100644 doc/data_import_and_export.Rmd create mode 100644 doc/data_import_and_export.html create mode 100644 doc/quick_start.R create mode 100644 doc/quick_start.Rmd create mode 100644 doc/quick_start.html diff --git a/Meta/vignette.rds b/Meta/vignette.rds new file mode 100644 index 0000000000000000000000000000000000000000..2280d0cfdfc22b24d973491caddd96d567295ce5 GIT binary patch literal 354 zcmV-o0iFIIiwFP!000001C>+3PQx$^&Dw5k6BC0SKyaz}fc*nv8bTbH7zb`ul!i5; zO-7PZ%8CC*Vz_Hk(n*ux5XXM8J1T=zHb5g|9gyFOkyNz-*P_>dc%b>zuT-V&*TOK!5 z>*~T~#f0wi7&T%fx*>DYR>L_NyK*@A#U4G}Qdf{I-exlV9ZREqdngTMcO(Wp+cywf z)EWl>lWpso0{X-L&-}mx(=pAv9A{LS!=3?yIZLTccx1W>{Kaf;+>WJxFr#bAWBUgB zceE^ogt*;kAVo2?%|_K<_yn`rY`%6GkrXS-$b!R4!|5};CkoN`58c8s8%+ZM0GkM? Al>h($ literal 0 HcmV?d00001 diff --git a/doc/civis_ml.R b/doc/civis_ml.R new file mode 100644 index 00000000..567742b0 --- /dev/null +++ b/doc/civis_ml.R @@ -0,0 +1,172 @@ +## ---- eval = FALSE------------------------------------------------------------ +# library(civis) +# +# civis_ml(df, ...) +# civis_ml("path/to/data.csv", ...) +# civis_ml(civis_table(table_name = "schema.table", database_name = "database"), ...) +# civis_ml(civis_file(1234), ...) + +## ---- eval = FALSE----------------------------------------------------------- +# options(civis.default_db = "my_database") +# tab <- civis_table(table_name = "sample_project.premium_training_set") + +## ---- eval = FALSE------------------------------------------------------------ +# library(civis) +# tab <- civis_table("sample_project.premium_training_set") +# m <- civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_forest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip") +# +# m <- civis_ml_random_forest_classifier(tab, +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip") + +## ---- eval = FALSE------------------------------------------------------------ +# tab <- civis_table("sample_project.premium_training_set") +# +# # hyperband +# m_hyper <- civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_forest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip", +# cross_validation_parameters = 'hyperband') +# +# # grid search +# cv_params <- list("max_depth" = c(2, 3, 5), +# "n_estimators" = c(50, 100, 500)) +# +# m_grid <- civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_forest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip", +# cross_validation_parameters = cv_params) +# +# + +## ---- eval = FALSE------------------------------------------------------------ +# m_stack <- civis_ml(tab, dependent_variable = "upgrade", +# model_type = "stacking_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip") + +## ---- eval=FALSE-------------------------------------------------------------- +# m + +## ----run_model, eval=FALSE, echo=FALSE---------------------------------------- +# # use this chunk to actually update the model if necessary +# library(civis) +# tab <- civis_table("sample_project.premium_training_set") +# cv_params <- list("max_depth" = c(2, 3, 5), +# "n_estimators" = c(50, 100, 500)) +# +# +# m <- civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_forest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip", +# cross_validation_parameters = cv_params) +# saveRDS(m, file = "../inst/civis_ml_brandable.rds") +# +# oos <- fetch_oos_scores(m) +# saveRDS(oos, file = "../inst/civis_ml_oos.rds") +# +# err_m <- tryCatch({ +# civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_fest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip", +# cross_validation_parameters = cv_params) +# }, error = function(e) e) +# saveRDS(err_m, file = "../inst/civis_ml_err.rds") +# + +## ---- eval=TRUE, echo=FALSE--------------------------------------------------- +library(civis) +path <- system.file("civis_ml_brandable.rds", package = 'civis') +m <- readRDS(path) +m + +## ----------------------------------------------------------------------------- +get_metric(m, "accuracy") +get_metric(m, "confusion_matrix") +get_metric(m, "roc_auc") + +## ---- echo=TRUE, eval=FALSE--------------------------------------------------- +# oos <- fetch_oos_scores(m) +# head(oos) + +## ---- echo=FALSE, eval=TRUE--------------------------------------------------- +path <- system.file("civis_ml_oos.rds", package = 'civis') +oos <- readRDS(path) +head(oos) + +## ---- fig.width = 5----------------------------------------------------------- +plot(m) + +## ----------------------------------------------------------------------------- +hist(m) + +## ---- eval=FALSE-------------------------------------------------------------- +# pred_tab <- civis_table(table_name = "sample_project.brandable_all_users") +# pred_job <- predict(m, newdata = pred_tab, +# output_table = "sample_project.brandable_user_scores") + +## ---- eval=FALSE-------------------------------------------------------------- +# pred_job <- predict(m, newdata = pred_tab, +# output_table = "sample_project.brandable_user_scores", +# n_jobs = 25) + +## ---- eval=FALSE-------------------------------------------------------------- +# yhat <- fetch_predictions(pred_job) + +## ---- eval=FALSE-------------------------------------------------------------- +# # download from S3 +# download_civis(pred_job$model_info$output_file_ids, path = "my_predictions.csv") +# +# # download from Redshift +# download_civis("sample_project.brandable_user_scores") + +## ---- eval=FALSE-------------------------------------------------------------- +# model_id <- m$job$id +# m <- civis_ml_fetch_existing(model_id) + +## ---- eval=FALSE-------------------------------------------------------------- +# civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_fest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip", +# cross_validation_parameters = cv_params) + +## ---- echo=FALSE, eval=TRUE--------------------------------------------------- +path <- system.file("civis_ml_err.rds", package = 'civis') +err <- readRDS(path) +err + +## ---- eval = FALSE------------------------------------------------------------ +# e <- tryCatch({ +# civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_fest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip") +# }, civis_ml_error = function(e) e) +# get_error(e) +# fetch_logs(e) + +## ---- eval=FALSE-------------------------------------------------------------- +# retry_model <- function(max_retries = 5) { +# i <- 1 +# while (i < max_retries) { +# tryCatch({ +# m <- civis_ml(tab, dependent_variable = "upgrade", +# model_type = "random_forest_classifier", +# primary_key = "brandable_user_id", +# excluded_columns = "residential_zip") +# return(m) +# }, civis_ml_error = function(e) stop(e)) +# cat("Retry: ", i, fill = TRUE) +# i <- i + 1 +# } +# stop("Exceeded maximum retries.") +# } + diff --git a/doc/civis_ml.Rmd b/doc/civis_ml.Rmd new file mode 100644 index 00000000..0887554e --- /dev/null +++ b/doc/civis_ml.Rmd @@ -0,0 +1,357 @@ +--- +title: "Machine Learning in R with CivisML" +author: "Patrick Miller and Liz Sander" +date: "2018-1-18" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{civis_ml} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +There are so many models to build! When this becomes challenging on a local machine, offloading model building to the cloud can save a lot of time and effort. + +[CivisML](https://medium.com/civis-analytics/civisml-scikit-learn-at-scale-b01b496916ea) is a machine learning service on Civis Platform that makes this as painless as possible. You can fit many different models, do extensive hyperparameter tuning, and score data sets with millions of observations stored in remote databases. Once these models are built, they live in Civis Platform permanently and can be included into production pipelines. Results can be easily incorporated into reports and dashboards. + +CivisML is built in Python using [scikit-learn](http://scikit-learn.org/stable/), and leverages AWS behind the scenes for efficient distributed computing. However, most of its features can be used through R without knowledge of Python or AWS with the `civis_ml` function in `civis`. + +While `civis_ml` is a complex function with many arguments, basic machine learning modeling and scoring can be easily carried out. We illustrate several features of `civis_ml` with data from a fictitious company called Brandable, who is looking to predict which customers are likely to upgrade from the free to the premium service. + +## Data sources + +The first step of modeling with `civis_ml` is to specify the data source, which is the first argument. `civis_ml` works with local data frames, a CSV on local disk, [feather-format](https://github.com/wesm/feather) files, tables in Redshift, and files on S3 (the files endpoint): + +```{r, eval = FALSE} +library(civis) + +civis_ml(df, ...) +civis_ml("path/to/data.csv", ...) +civis_ml(civis_table(table_name = "schema.table", database_name = "database"), ...) +civis_ml(civis_file(1234), ...) +``` + +The Brandable data is located in a Redshift table called `sample_project.premium_training_set`. + +```{r, eval = FALSE} +options(civis.default_db = "my_database") +tab <- civis_table(table_name = "sample_project.premium_training_set") +``` + +Note that `civis_table` only returns information on where to find the data for `civis_ml`, not the data itself. `civis_table` also takes two SQL statements that can be useful for limiting the rows used for training: `sql_where`, and `sql_limit`. + + +## Modeling + +After the data source is specified, we next choose the model type. There are 13 named CivisML models that can be called from `civis_ml`, 6 for classification and 7 for regression. The name of the model corresponds to the name of the estimator in scikit-learn. It can be given in the `model_type` argument of `civis_ml`, or called directly using a `civis_ml_*` function such as `civis_ml_sparse_logistic`. + +| Name | R Workflow | Model Type | scikit-learn Documentation | +|------|:-----------|------------|-----------|------------------| + `sparse_logistic` | `civis_ml_sparse_logistic` | classification | [Logistic Regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) | + `gradient_boosting_classifier` | `civis_ml_gradient_boosting_classifier` | classification | [GradientBoostingClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) | + `random_forest_classifier` | `civis_ml_random_forest_classifier` | classification | [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) | + `extra_trees_classifier` | `civis_ml_extra_trees_classifier` | classification | [ExtraTreesClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html)| + `multilayer_perceptron_classifier` | | classification | [muffnn.MLPClassifier](https://github.com/civisanalytics/muffnn) | + `stacking_classifier` | | classification | [StackedClassifier](https://github.com/civisanalytics/civisml-extensions) + `sparse_linear_regressor` | `civis_ml_sparse_linear_regressor` | regression | [LinearRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) | + `sparse_ridge_regressor` | `civis_ml_sparse_ridge_regressor` | regression | [Ridge](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) | + `gradient_boosting_regressor` | `civis_ml_gradient_boosting_regressor` | regression | [GradientBoostingRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) | + `random_forest_regressor` | `civis_ml_random_forest_regressor` | regression | [RandomForestRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html)| + `extra_trees_regressor` | `civis_ml_extra_trees_regressor` | regression | [ExtraTreesRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html)| +`multilayer_perceptron_regressor` | | regression | [muffnn.MLPRegressor](https://github.com/civisanalytics/muffnn) | +`stacking_regressor` | | regression | [StackedRegressor](https://github.com/civisanalytics/civisml-extensions) + +Documentation on the meta parameters specific to each estimator are provided in `?civis_ml_*`. For example, the regularization strength parameter `C` of `sparse_logistic` is documented in `?civis_ml_sparse_logistic`. + +For the Brandable data, we use a `random_forest` classifier to predict the probability that a customer upgrades from free to premium services. For efficiency, we can also denote a `primary_key`, and a set of `excluded_columns` that are not included in the model: + +```{r, eval = FALSE} +library(civis) +tab <- civis_table("sample_project.premium_training_set") +m <- civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_forest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip") + +m <- civis_ml_random_forest_classifier(tab, + primary_key = "brandable_user_id", + excluded_columns = "residential_zip") +``` + +Note that if the dependent variables have null values, those rows will be removed before modeling. + +### Hyperparameter Tuning + +You can tune hyperparameters using one of two methods: grid search or hyperband. CivisML will perform grid search if you pass a named list of hyperparameters and candidate values to `cross_validation_parameters`. By default, hyperparameter tuning will run in parallel, using as many jobs as possible without overloading your computing cluster. If you wish to have more control over the number of jobs running at once, you can set it using the `n_jobs` parameter. + +[Hyperband](https://arxiv.org/abs/1603.06560) is an efficient approach to hyperparameter optimization, and recommended over grid search where possible. CivisML will perform hyperband optimization if you pass the string `"hyperband"` to `cross_validation_parameters`. Hyperband cannot be used to tune GLMs. For this reason, preset GLMs do not have a hyperband option. Hyperband is supported for random forests, gradient boosted trees, extra trees, multilayer perceptrons, and the random forest and gradient boosted tree steps of stacking. It is highly recommended that multilayer perceptron models only be used with hyperband. + +For the `random_forest_classifier` in the Brandable data, we try both `"hyperband"` and grid search for hyperparameter optimization. + +```{r, eval = FALSE} +tab <- civis_table("sample_project.premium_training_set") + +# hyperband +m_hyper <- civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_forest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip", + cross_validation_parameters = 'hyperband') + +# grid search +cv_params <- list("max_depth" = c(2, 3, 5), + "n_estimators" = c(50, 100, 500)) + +m_grid <- civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_forest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip", + cross_validation_parameters = cv_params) + + +``` + +CivisML runs pre-defined models with hyperband using the following distributions: + + +| Models | Cost Parameter | Hyperband Distributions | +| ---------------------------------- | ------------------ | --------------------------------------------------------------------------- | +| gradient_boosting_classifier
gradient_boosting_regressor
GBT step in stacking_classifier
GBT step in stacking_regressor | `n_estimators`
`min = 100,`
`max = 1000` | `max_depth: randint(low=1, high=5)`
`max_features: [None, 'sqrt', 'log2', 0.5, 0.3, 0.1, 0.05, 0.01]`
`learning_rate: truncexpon(b=5, loc=.0003, scale=1./167.)` | +| ---------------------------------- | ------------------ | --------------------------------------------------------------------------- | +| random_forest_classifier
random_forest_regressor
extra_trees_classifier
extra_trees_regressor
RF step in stacking_classifier
RF step in stacking_regressor | `n_estimators`
`min = 100,`
`max = 1000` | `criterion: ['gini', 'entropy']`
`max_features: truncexpon(b=10., loc=.01, scale=1./10.11)`
`max_depth: [1, 2, 3, 4, 6, 10, None]` | +| ---------------------------------- | ------------------ | --------------------------------------------------------------------------- | +| multilayer_perceptron_classifier
multilayer_perceptron_regressor | `n_epochs`
`min = 5,`
`max = 50` | `keep_prob: uniform()`
``hidden_units: [(), (16,), (32,), (64,), (64, 64), (64, 64, 64),`
`(128,), (128, 128), (128, 128, 128), (256,),`
`(256, 256), (256, 256, 256), (512, 256, 128, 64),`
`(1024, 512, 256, 128)]`
`learning_rate: [1e-2, 2e-2, 5e-2, 8e-2, 1e-3, 2e-3, 5e-3, 8e-3, 1e-4]` | + +The truncated exponential distribution for the gradient boosting +classifier and regressor was chosen to skew the distribution toward +small values, ranging between .0003 and .03, with a mean close to +.006. Similarly, the truncated exponential distribution for the random +forest and extra trees models skews toward small values, ranging +between .01 and 1, and with a mean close to .1. + +### Stacking + +The `"stacking_classifier"` model stacks together the `"gradient_boosting_classifier"` and `"random_forest_classifier"` predefined models together with a +`glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, +scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, +tol=1e-5, scoring='r2')`, combining them using [NonNegativeLinearRegression](https://github.com/civisanalytics/civisml-extensions). + +```{r, eval = FALSE} +m_stack <- civis_ml(tab, dependent_variable = "upgrade", + model_type = "stacking_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip") +``` + +## Results + +A simple summary of the results from the best fitting model is provided with `print`: + +```{r, eval=FALSE} +m +``` + + +```{r run_model, eval=FALSE, echo=FALSE} +# use this chunk to actually update the model if necessary +library(civis) +tab <- civis_table("sample_project.premium_training_set") +cv_params <- list("max_depth" = c(2, 3, 5), + "n_estimators" = c(50, 100, 500)) + + +m <- civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_forest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip", + cross_validation_parameters = cv_params) +saveRDS(m, file = "../inst/civis_ml_brandable.rds") + +oos <- fetch_oos_scores(m) +saveRDS(oos, file = "../inst/civis_ml_oos.rds") + +err_m <- tryCatch({ + civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_fest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip", + cross_validation_parameters = cv_params) + }, error = function(e) e) +saveRDS(err_m, file = "../inst/civis_ml_err.rds") + +``` + +```{r, eval=TRUE, echo=FALSE} +library(civis) +path <- system.file("civis_ml_brandable.rds", package = 'civis') +m <- readRDS(path) +m +``` +Following the link takes you to a summary of the model results in Civis Platform. Additional metrics can be computed with `get_metric`: + +```{r} +get_metric(m, "accuracy") +get_metric(m, "confusion_matrix") +get_metric(m, "roc_auc") +``` + +Out of sample (or out of fold) scores used in training can be retrieved using `fetch_oos_scores`: + +```{r, echo=TRUE, eval=FALSE} +oos <- fetch_oos_scores(m) +head(oos) +``` + +```{r, echo=FALSE, eval=TRUE} +path <- system.file("civis_ml_oos.rds", package = 'civis') +oos <- readRDS(path) +head(oos) +``` + +## Diagnostics + +For classification problems, `plot` produces a a decile plot using `ggplot2`. For the premium upgrade model, the decile plot shows that the top-scoring 10\% of individuals contain 2.20 times as many targets (people who upgraded) as a randomly selected list of the same size. + +```{r, fig.width = 5} +plot(m) +``` + +For regression problems, `plot` produces a binned scatter-plot of $y$ against $\hat{y}$. + +`hist` shows the histogram of out of sample (out of fold scores), also using `ggplot2`: + +```{r} +hist(m) +``` + + +## Prediction and Scoring + +CivisML can also be used to score models on hundreds of millions of rows, and distributed over many compute instances. Like many estimators in R, this is done through a `predict` method. The `newdata` argument of `predict` can take any data source supported in `civis_ml`. Here we use a table in Redshift containing all Brandable users, and output the result to another table in Redshift: + +```{r, eval=FALSE} +pred_tab <- civis_table(table_name = "sample_project.brandable_all_users") +pred_job <- predict(m, newdata = pred_tab, + output_table = "sample_project.brandable_user_scores") +``` + +Like training and validation, scoring is distributed by default, using up to 90 percent of your computing cluster resources. If you would like to have more control over the number of jobs that are run at once, you can set a maximum using `n_jobs`: + +```{r, eval=FALSE} +pred_job <- predict(m, newdata = pred_tab, + output_table = "sample_project.brandable_user_scores", + n_jobs = 25) +``` + +The predictions can be loaded into memory using `fetch_predictions`, which downloads directly from S3: + +```{r, eval=FALSE} +yhat <- fetch_predictions(pred_job) +``` + +Note that if the table of predictions exceeds available memory, it may be helpful to use `download_civis` instead. + +```{r, eval=FALSE} +# download from S3 +download_civis(pred_job$model_info$output_file_ids, path = "my_predictions.csv") + +# download from Redshift +download_civis("sample_project.brandable_user_scores") +``` + +## Retrieving Existing models + +An existing model (or particular run of an existing model) can be retrieved using `civis_ml_fetch_existing`: + +```{r, eval=FALSE} +model_id <- m$job$id +m <- civis_ml_fetch_existing(model_id) +``` + +## Error Handling + +Unfortunately, many kinds of errors can occur. When an error occurs within CivisML, a `civis_ml_error` is thrown. By default, the log from the CivisML job is printed, which is useful for debugging. + +Here is an example error from misspelling the model type: + +```{r, eval=FALSE} +civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_fest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip", + cross_validation_parameters = cv_params) +``` + +```{r, echo=FALSE, eval=TRUE} +path <- system.file("civis_ml_err.rds", package = 'civis') +err <- readRDS(path) +err +``` + +If you don't understand the error message, providing the error message, job, and run ids to support is the best way to get help! + +## Programming with `civis_ml` + +When programming with `civis_ml`, errors can be caught using the base R `try` or `tryCatch`. In `civis`, we provide functions for getting debugging information using `get_error` or just the logs using `fetch_logs`. + +```{r, eval = FALSE} +e <- tryCatch({ + civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_fest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip") + }, civis_ml_error = function(e) e) +get_error(e) +fetch_logs(e) +``` + +Error handling can be used to implement more robust workflow programming with `civis_ml`. In the following function, we implement `retry_model`, which retries on e.g. connection failures but not on a `civis_ml_error`. + +```{r, eval=FALSE} +retry_model <- function(max_retries = 5) { + i <- 1 + while (i < max_retries) { + tryCatch({ + m <- civis_ml(tab, dependent_variable = "upgrade", + model_type = "random_forest_classifier", + primary_key = "brandable_user_id", + excluded_columns = "residential_zip") + return(m) + }, civis_ml_error = function(e) stop(e)) + cat("Retry: ", i, fill = TRUE) + i <- i + 1 + } + stop("Exceeded maximum retries.") +} +``` + +Workflow programming could be further enhanced by printing the logs, storing the error object, or writing error logs to a file or database. + +## Appendix + +### Parallelization + +To fit many models in parallel using `parallel`, `foreach`, or `future`, check out [this article](https://civisanalytics.github.io/civis-r/articles/concurrency.html) or the vignette on concurrency at `browseVignettes("civis")`. + +### Sample weights + +Many estimators take a `sample_weight` argument. This can be be specified with the `fit_params` argument of `civis_ml` using `list(sample_weight = 'survey_weight_column')`. + +### Missing data + +Modeling data must be complete. Any missing values will be imputed with the mean of non-null values in a column. + +### CivisML Versions + +By default, CivisML uses its latest version in production. +If you would like a specific version +(e.g., for a production pipeline where pinning the CivisML version is desirable), +both `civis_ml` and the `civis_ml_*` functions have the optional parameter +``civisml_version`` that accepts a string, e.g., ``'v2.3'`` +for CivisML v2.3. Please see [here](https://civis.zendesk.com/hc/en-us/articles/360000260011-CivisML) for the list of CivisML versions. + +### More information + +Custom estimators can be written in Python and included in CivisML if they follow the scikit-learn API. For example, the `sparse_logistic`, `sparse_linear_regressor`, and `sparse_ridge_regressor` models all use the public Civis Analytics [glmnet](https://github.com/civisanalytics/python-glmnet) wrapper in Python. + +Browse [the CivisML documentation](https://civis-python.readthedocs.io/en/stable/ml.html) for more details! + diff --git a/doc/civis_ml.html b/doc/civis_ml.html new file mode 100644 index 00000000..82ac646b --- /dev/null +++ b/doc/civis_ml.html @@ -0,0 +1,874 @@ + + + + + + + + + + + + + + + +Machine Learning in R with CivisML + + + + + + + + + + + + + + + + + + + + + + + + + + +

Machine Learning in R with CivisML

+

Patrick Miller and Liz Sander

+

2018-1-18

+ + + +

There are so many models to build! When this becomes challenging on a +local machine, offloading model building to the cloud can save a lot of +time and effort.

+

CivisML +is a machine learning service on Civis Platform that makes this as +painless as possible. You can fit many different models, do extensive +hyperparameter tuning, and score data sets with millions of observations +stored in remote databases. Once these models are built, they live in +Civis Platform permanently and can be included into production +pipelines. Results can be easily incorporated into reports and +dashboards.

+

CivisML is built in Python using scikit-learn, and leverages +AWS behind the scenes for efficient distributed computing. However, most +of its features can be used through R without knowledge of Python or AWS +with the civis_ml function in civis.

+

While civis_ml is a complex function with many +arguments, basic machine learning modeling and scoring can be easily +carried out. We illustrate several features of civis_ml +with data from a fictitious company called Brandable, who is looking to +predict which customers are likely to upgrade from the free to the +premium service.

+
+

Data sources

+

The first step of modeling with civis_ml is to specify +the data source, which is the first argument. civis_ml +works with local data frames, a CSV on local disk, feather-format files, tables +in Redshift, and files on S3 (the files endpoint):

+
library(civis)
+
+civis_ml(df, ...)
+civis_ml("path/to/data.csv", ...)
+civis_ml(civis_table(table_name = "schema.table", database_name = "database"), ...)
+civis_ml(civis_file(1234), ...)
+

The Brandable data is located in a Redshift table called +sample_project.premium_training_set.

+
options(civis.default_db = "my_database")
+tab <- civis_table(table_name = "sample_project.premium_training_set")
+

Note that civis_table only returns information on where +to find the data for civis_ml, not the data itself. +civis_table also takes two SQL statements that can be +useful for limiting the rows used for training: sql_where, +and sql_limit.

+
+
+

Modeling

+

After the data source is specified, we next choose the model type. +There are 13 named CivisML models that can be called from +civis_ml, 6 for classification and 7 for regression. The +name of the model corresponds to the name of the estimator in +scikit-learn. It can be given in the model_type argument of +civis_ml, or called directly using a +civis_ml_* function such as +civis_ml_sparse_logistic.

+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameR WorkflowModel Typescikit-learn Documentation
sparse_logisticcivis_ml_sparse_logisticclassificationLogistic +Regression
gradient_boosting_classifiercivis_ml_gradient_boosting_classifierclassificationGradientBoostingClassifier
random_forest_classifiercivis_ml_random_forest_classifierclassificationRandomForestClassifier
extra_trees_classifiercivis_ml_extra_trees_classifierclassificationExtraTreesClassifier
multilayer_perceptron_classifierclassificationmuffnn.MLPClassifier
stacking_classifierclassificationStackedClassifier
sparse_linear_regressorcivis_ml_sparse_linear_regressorregressionLinearRegression
sparse_ridge_regressorcivis_ml_sparse_ridge_regressorregressionRidge
gradient_boosting_regressorcivis_ml_gradient_boosting_regressorregressionGradientBoostingRegressor
random_forest_regressorcivis_ml_random_forest_regressorregressionRandomForestRegressor
extra_trees_regressorcivis_ml_extra_trees_regressorregressionExtraTreesRegressor
multilayer_perceptron_regressorregressionmuffnn.MLPRegressor
stacking_regressorregressionStackedRegressor
+

Documentation on the meta parameters specific to each estimator are +provided in ?civis_ml_*. For example, the regularization +strength parameter C of sparse_logistic is +documented in ?civis_ml_sparse_logistic.

+

For the Brandable data, we use a random_forest +classifier to predict the probability that a customer upgrades from free +to premium services. For efficiency, we can also denote a +primary_key, and a set of excluded_columns +that are not included in the model:

+
library(civis)
+tab <- civis_table("sample_project.premium_training_set")
+m   <- civis_ml(tab, dependent_variable = "upgrade",
+                model_type = "random_forest_classifier",
+                primary_key = "brandable_user_id",
+                excluded_columns = "residential_zip")
+
+m <- civis_ml_random_forest_classifier(tab,
+      primary_key = "brandable_user_id",
+      excluded_columns = "residential_zip")
+

Note that if the dependent variables have null values, those rows +will be removed before modeling.

+
+

Hyperparameter Tuning

+

You can tune hyperparameters using one of two methods: grid search or +hyperband. CivisML will perform grid search if you pass a named list of +hyperparameters and candidate values to +cross_validation_parameters. By default, hyperparameter +tuning will run in parallel, using as many jobs as possible without +overloading your computing cluster. If you wish to have more control +over the number of jobs running at once, you can set it using the +n_jobs parameter.

+

Hyperband is an +efficient approach to hyperparameter optimization, and recommended over +grid search where possible. CivisML will perform hyperband optimization +if you pass the string "hyperband" to +cross_validation_parameters. Hyperband cannot be used to +tune GLMs. For this reason, preset GLMs do not have a hyperband option. +Hyperband is supported for random forests, gradient boosted trees, extra +trees, multilayer perceptrons, and the random forest and gradient +boosted tree steps of stacking. It is highly recommended that multilayer +perceptron models only be used with hyperband.

+

For the random_forest_classifier in the Brandable data, +we try both "hyperband" and grid search for hyperparameter +optimization.

+
tab <- civis_table("sample_project.premium_training_set")
+
+# hyperband
+m_hyper <- civis_ml(tab, dependent_variable = "upgrade",
+              model_type = "random_forest_classifier",
+              primary_key = "brandable_user_id",
+              excluded_columns = "residential_zip",
+              cross_validation_parameters = 'hyperband')
+
+# grid search
+cv_params <- list("max_depth" = c(2, 3, 5),
+                  "n_estimators" = c(50, 100, 500))
+
+m_grid <- civis_ml(tab, dependent_variable = "upgrade",
+              model_type = "random_forest_classifier",
+              primary_key = "brandable_user_id",
+              excluded_columns = "residential_zip",
+              cross_validation_parameters = cv_params)
+

CivisML runs pre-defined models with hyperband using the following +distributions:

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelsCost ParameterHyperband Distributions
gradient_boosting_classifier
gradient_boosting_regressor
+GBT step in stacking_classifier
GBT step in stacking_regressor
n_estimators
min = 100,
+max = 1000
max_depth: randint(low=1, high=5)
+max_features: [None, 'sqrt', 'log2', 0.5, 0.3, 0.1, 0.05, 0.01] +
+learning_rate: truncexpon(b=5, loc=.0003, scale=1./167.)
———————————-———————————————————————————————
random_forest_classifier
random_forest_regressor
+extra_trees_classifier
extra_trees_regressor
RF step in +stacking_classifier
RF step in stacking_regressor
n_estimators
min = 100,
+max = 1000
criterion: ['gini', 'entropy']
+max_features: truncexpon(b=10., loc=.01, scale=1./10.11) +
max_depth: [1, 2, 3, 4, 6, 10, None]
———————————-———————————————————————————————
multilayer_perceptron_classifier
+multilayer_perceptron_regressor
n_epochs
min = 5,
+max = 50
keep_prob: uniform()
+`hidden_units: [(), (16,), (32,), (64,), (64, 64), (64, 64, 64), +
(128,), (128, 128), (128, 128, 128), (256,),
+(256, 256), (256, 256, 256), (512, 256, 128, 64),
+(1024, 512, 256, 128)]
+learning_rate: [1e-2, 2e-2, 5e-2, 8e-2, 1e-3, 2e-3, 5e-3, 8e-3, 1e-4]
+

The truncated exponential distribution for the gradient boosting +classifier and regressor was chosen to skew the distribution toward +small values, ranging between .0003 and .03, with a mean close to .006. +Similarly, the truncated exponential distribution for the random forest +and extra trees models skews toward small values, ranging between .01 +and 1, and with a mean close to .1.

+
+
+

Stacking

+

The "stacking_classifier" model stacks together the +"gradient_boosting_classifier" and +"random_forest_classifier" predefined models together with +a +glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='log_loss'). +Defaults for the predefined models are documented in +?civis_ml. Each column is first standardized, +and then the model predictions are combined using LogisticRegressionCV +with penalty='l2' and tol=1e-08. The +"stacking_regressor" works similarly, stacking together the +"gradient_boosting_regressor" and +"random_forest_regressor" models and a +glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2'), +combining them using NonNegativeLinearRegression.

+
m_stack <- civis_ml(tab, dependent_variable = "upgrade",
+              model_type = "stacking_classifier",
+              primary_key = "brandable_user_id",
+              excluded_columns = "residential_zip")
+
+
+
+

Results

+

A simple summary of the results from the best fitting model is +provided with print:

+
m
+
## <CivisML random_forest_classifier>
+## https://platform.civisanalytics.com/#/models/7072485
+## Job id:  7072485  Run id:  58251183 
+## 
+## AUC:  0.8009
+## upgrade:
+##                  0      1
+## Prop Correct 0.923 0.3297
+

Following the link takes you to a summary of the model results in +Civis Platform. Additional metrics can be computed with +get_metric:

+
get_metric(m, "accuracy")
+
## [1] 0.761
+
get_metric(m, "confusion_matrix")
+
##      [,1] [,2]
+## [1,]  671   56
+## [2,]  183   90
+
get_metric(m, "roc_auc")
+
## [1] 0.8009004
+

Out of sample (or out of fold) scores used in training can be +retrieved using fetch_oos_scores:

+
oos <- fetch_oos_scores(m)
+head(oos)
+
##   brandable_user_id upgrade_1
+## 1   00214b9181f2347    0.3280
+## 2   0b6cbd77cb8d98b    0.7140
+## 3   12ca082b063a3bf    0.4480
+## 4   130060adea791e8    0.2060
+## 5   1495366621d3834    0.3152
+## 6   1a8ed19916ae7c2    0.1600
+
+
+

Diagnostics

+

For classification problems, plot produces a a decile +plot using ggplot2. For the premium upgrade model, the +decile plot shows that the top-scoring 10% of individuals contain 2.20 +times as many targets (people who upgraded) as a randomly selected list +of the same size.

+
plot(m)
+

+

For regression problems, plot produces a binned +scatter-plot of \(y\) against \(\hat{y}\).

+

hist shows the histogram of out of sample (out of fold +scores), also using ggplot2:

+
hist(m)
+

+
+
+

Prediction and Scoring

+

CivisML can also be used to score models on hundreds of millions of +rows, and distributed over many compute instances. Like many estimators +in R, this is done through a predict method. The +newdata argument of predict can take any data +source supported in civis_ml. Here we use a table in +Redshift containing all Brandable users, and output the result to +another table in Redshift:

+
pred_tab <- civis_table(table_name = "sample_project.brandable_all_users")
+pred_job <- predict(m, newdata = pred_tab,
+                    output_table = "sample_project.brandable_user_scores")
+

Like training and validation, scoring is distributed by default, +using up to 90 percent of your computing cluster resources. If you would +like to have more control over the number of jobs that are run at once, +you can set a maximum using n_jobs:

+
pred_job <- predict(m, newdata = pred_tab,
+                    output_table = "sample_project.brandable_user_scores",
+                    n_jobs = 25)
+

The predictions can be loaded into memory using +fetch_predictions, which downloads directly from S3:

+
yhat <- fetch_predictions(pred_job)
+

Note that if the table of predictions exceeds available memory, it +may be helpful to use download_civis instead.

+
# download from S3
+download_civis(pred_job$model_info$output_file_ids, path = "my_predictions.csv")
+
+# download from Redshift
+download_civis("sample_project.brandable_user_scores")
+
+
+

Retrieving Existing models

+

An existing model (or particular run of an existing model) can be +retrieved using civis_ml_fetch_existing:

+
model_id <- m$job$id
+m <- civis_ml_fetch_existing(model_id)
+
+
+

Error Handling

+

Unfortunately, many kinds of errors can occur. When an error occurs +within CivisML, a civis_ml_error is thrown. By default, the +log from the CivisML job is printed, which is useful for debugging.

+

Here is an example error from misspelling the model type:

+
civis_ml(tab, dependent_variable = "upgrade",
+         model_type = "random_fest_classifier",
+         primary_key = "brandable_user_id",
+         excluded_columns = "residential_zip",
+         cross_validation_parameters = cv_params)
+
## <civis_ml_error>
+## scripts_get_custom_runs(id = 7077157, run_id = 58263925): 
+## 2017-08-29 13:01:54 PM CDT Queued
+## 2017-08-29 13:01:55 PM CDT Running
+## 2017-08-29 13:01:57 PM CDT Dedicating resources
+## 2017-08-29 13:01:58 PM CDT Downloading code and container
+## 2017-08-29 13:01:59 PM CDT Executing script
+## 2017-08-29 13:02:03 PM CDT Please select one of the pre-defined models: ['sparse_logistic', 'sparse_linear_regressor', 'sparse_ridge_regressor', 'gradient_boosting_classifier', 'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_regressor', 'random_forest_regressor', 'extra_trees_regressor']
+## 2017-08-29 13:02:05 PM CDT Process used approximately 97.57 MiB of its 3188 MiB memory limit
+## 2017-08-29 13:02:05 PM CDT Failed
+## 2017-08-29 13:02:06 PM CDT Error on job: Process ended with an error, exiting: 1.
+

If you don’t understand the error message, providing the error +message, job, and run ids to support is the best way to get help!

+
+
+

Programming with civis_ml

+

When programming with civis_ml, errors can be caught +using the base R try or tryCatch. In +civis, we provide functions for getting debugging +information using get_error or just the logs using +fetch_logs.

+
e <- tryCatch({
+  civis_ml(tab, dependent_variable = "upgrade",
+        model_type = "random_fest_classifier",
+        primary_key = "brandable_user_id",
+        excluded_columns = "residential_zip")
+  }, civis_ml_error = function(e) e)
+get_error(e)
+fetch_logs(e)
+

Error handling can be used to implement more robust workflow +programming with civis_ml. In the following function, we +implement retry_model, which retries on e.g. connection +failures but not on a civis_ml_error.

+
retry_model <- function(max_retries = 5) {
+  i <- 1
+  while (i < max_retries) {
+    tryCatch({
+      m <- civis_ml(tab, dependent_variable = "upgrade",
+               model_type = "random_forest_classifier",
+               primary_key = "brandable_user_id",
+               excluded_columns = "residential_zip")
+      return(m)
+    }, civis_ml_error = function(e) stop(e))
+    cat("Retry: ", i, fill = TRUE)
+    i <- i + 1
+  }
+  stop("Exceeded maximum retries.")
+}
+

Workflow programming could be further enhanced by printing the logs, +storing the error object, or writing error logs to a file or +database.

+
+
+

Appendix

+
+

Parallelization

+

To fit many models in parallel using parallel, +foreach, or future, check out this +article or the vignette on concurrency at +browseVignettes("civis").

+
+
+

Sample weights

+

Many estimators take a sample_weight argument. This can +be be specified with the fit_params argument of +civis_ml using +list(sample_weight = 'survey_weight_column').

+
+
+

Missing data

+

Modeling data must be complete. Any missing values will be imputed +with the mean of non-null values in a column.

+
+
+

CivisML Versions

+

By default, CivisML uses its latest version in production. If you +would like a specific version (e.g., for a production pipeline where +pinning the CivisML version is desirable), both civis_ml +and the civis_ml_* functions have the optional parameter +civisml_version that accepts a string, e.g., +'v2.3' for CivisML v2.3. Please see here +for the list of CivisML versions.

+
+
+

More information

+

Custom estimators can be written in Python and included in CivisML if +they follow the scikit-learn API. For example, the +sparse_logistic, sparse_linear_regressor, and +sparse_ridge_regressor models all use the public Civis +Analytics glmnet +wrapper in Python.

+

Browse the CivisML +documentation for more details!

+
+
+ + + + + + + + + + + diff --git a/doc/civis_scripts.R b/doc/civis_scripts.R new file mode 100644 index 00000000..cb2e7cda --- /dev/null +++ b/doc/civis_scripts.R @@ -0,0 +1,199 @@ +## ----setup, include = FALSE--------------------------------------------------- +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + eval = FALSE +) + +## ----------------------------------------------------------------------------- +# # create a container script with a parameter +# script <- scripts_post_containers( +# required_resources = list(cpu = 1024, memory = 50, diskSpace = 15), +# docker_command = 'cd /package_dir && Rscript inst/run_script.R', +# docker_image_name = 'civisanalytics/datascience-r', +# name = 'SCRIPT NAME', +# params = list( +# list(name = 'NAME_OF_ENV_VAR', +# label = 'Name User Sees', +# type = 'string', +# required = TRUE) +# ) +# ) +# +# # publish the container script as a template +# template <- templates_post_scripts(script$id, name = 'TEMPLATE NAME', note = 'Markdown Docs') +# +# # run a template script, returning file ids of run outputs +# out <- run_template(template$id) +# +# # post a file or JSONValue run output within a script +# write_job_output('filename.csv') +# json_values_post(jsonlite::toJSON(my_list), 'my_list.json') +# +# # get run output file ids of a script +# out <- fetch_output_file_ids(civis_script(id)) +# +# # get csv run outputs of a script +# df <- read_civis(civis_script(id), regex = '.csv', using = read.csv) +# +# # get JSONValue run outputs +# my_list <- read_civis(civis_script(id)) +# + +## ---- eval = FALSE------------------------------------------------------------ +# source <- c(' +# print("Hello World!") +# ') +# job <- scripts_post_r(name = 'Hello!', source = source) + +## ---- eval = FALSE------------------------------------------------------------ +# run <- scripts_post_r_runs(job$id) +# +# # check the status +# scripts_get_r_runs(job$id, run$id) +# +# # automatically poll until the job completes +# await(scripts_get_r_runs, id = job$id, run_id = run$id) + +## ----------------------------------------------------------------------------- +# run_script <- function(source, name = 'Cool') { +# job <- scripts_post_r(name = name, source = source) +# run <- scripts_post_r_runs(job$id) +# await(scripts_get_r_runs, id = job$id, run_id = run$id) +# } + +## ---- eval=FALSE-------------------------------------------------------------- +# source <- c(" +# library(civis) +# data(iris) +# write.csv(iris, 'iris.csv') +# job_id <- as.numeric(Sys.getenv('CIVIS_JOB_ID')) +# run_id <- as.numeric(Sys.getenv('CIVIS_RUN_ID')) +# file_id <- write_civis_file('iris.csv') +# scripts_post_r_runs_outputs(job_id, run_id, object_type = 'File', object_id = file_id) +# ") +# run <- run_script(source) + +## ---- eval=FALSE-------------------------------------------------------------- +# source <- c(" +# library(civis) +# data(iris) +# write.csv(iris, 'iris.csv') +# write_job_output('iris.csv') +# ") +# run <- run_script(source) + +## ---- eval=FALSE-------------------------------------------------------------- +# source <- c(" +# library(civis) +# library(jsonlite) +# my_farm <- list(cows = 1, ducks = list(mallard = 2, goldeneye = 1)) +# json_values_post(jsonlite::toJSON(my_farm), name = 'my_farm.json') +# ") +# run_farm <- run_script(source) + +## ----------------------------------------------------------------------------- +# out <- scripts_list_r_runs_outputs(run$rId, run$id) +# iris <- read_civis(out$objectId, using = read.csv) + +## ---- eval = FALSE------------------------------------------------------------ +# # get csv run outputs +# iris <- read_civis(civis_script(run$rId), regex = '.csv', using = read.csv) +# +# # get JSONValues +# my_farm <- read_civis(civis_script(run_farm$rId)) + +## ---- eval=FALSE-------------------------------------------------------------- +# # Add 'params' and 'arguments' to run_script +# run_script <- function(source, args, name = 'Cool') { +# params <- list( # params is a list of individual parameters +# list( +# name = 'PET_NAME', # name of the environment variable with the user value +# label = 'Pet Name', # name displaayed to the user +# type = 'string', # type +# required = TRUE # required? +# ) +# ) +# job <- scripts_post_r(name = name, +# source = source, +# params = params, +# arguments = args) +# run <- scripts_post_r_runs(job$id) +# await(scripts_get_r_runs, id = job$id, run_id = run$id) +# } +# +# # Access the PET_NAME variable +# source <- c(' +# library(civis) +# pet_name <- Sys.getenv("PET_NAME") +# msg <- paste0("Hello", pet_name, "!") +# print(msg) +# ') +# +# # Let's run it! Here we pass the argument 'Fitzgerald' to the +# # parameter 'PET_NAME' that we created. +# run_script(source, name = 'Pet Greeting', args = list(PET_NAME = 'Fitzgerald')) +# + +## ---- eval=FALSE-------------------------------------------------------------- +# params <- list( +# list( +# name = 'PET_NAME', +# label = 'Pet Name', +# type = 'string', +# required = TRUE +# ) +# ) +# job <- scripts_post_r(name = 'Pet Greeter', +# source = source, +# params = params) + +## ---- eval=FALSE-------------------------------------------------------------- +# note <- c(" +# # Pet Greeter +# +# Greets your pet, given its name! +# +# For your pet to receive the greeting, it must be a Civis Platform +# user with the ability to read. +# +# Parameters: +# * Pet Name: string, Name of pet. +# +# +# Returns: +# * Nothing +# ") +# template <- templates_post_scripts(script_id = job$id, note = note, name = 'Pet Greeter') + +## ---- eval=FALSE-------------------------------------------------------------- +# job <- scripts_post_custom(id, arguments = arguments, ...) +# run <- scripts_post_custom_runs(job$id) +# await(scripts_get_custom_runs, id = job$id, run_id = run$id) + +## ---- eval = FALSE------------------------------------------------------------ +# out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES')) + +## ---- eval = FALSE------------------------------------------------------------ +# # We might need to find the project id first +# search_list(type = 'project', 'My project Name') +# out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'), +# target_project_id = project_id) + +## ----eval=FALSE--------------------------------------------------------------- +# templates_patch_scripts(template_id$id, note = new_note) + +## ---- eval = FALSE------------------------------------------------------------ +# source <- c(' +# library(civis) +# pet_name <- Sys.getenv("PET_NAME") +# msg <- paste0("Hello ", pet_name, "! Would you care for a sandwich?") +# print(msg) +# ') +# scripts_patch_r(id = job$id, name = 'Pet Greeter', +# source = source, +# params = params) + +## ----------------------------------------------------------------------------- +# templates_patch_scripts(template$id, archived = TRUE) + diff --git a/doc/civis_scripts.Rmd b/doc/civis_scripts.Rmd new file mode 100644 index 00000000..1f06d154 --- /dev/null +++ b/doc/civis_scripts.Rmd @@ -0,0 +1,382 @@ +--- +title: "Productionizing with Civis Scripts" +author: "Patrick Miller" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Productionizing with Civis Scripts} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r setup, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + eval = FALSE +) +``` + +Civis Scripts are the way to productionize your code with Civis Platform. +You've probably used three of the four types of scripts already in the Civis Platform UI ("Code" --> "Scripts"): +_language_ ([R](https://platform.civisanalytics.com/spa/#/scripts/new?type=r), [Python3](https://platform.civisanalytics.com/spa/#/scripts/new?type=python), [javascript](https://platform.civisanalytics.com/spa/#/scripts/new?type=javascript), and [sql](https://platform.civisanalytics.com/spa/#/scripts/new?type=sql)), [_container_](https://platform.civisanalytics.com/spa/#/scripts/new?type=container), +and [_custom_](https://platform.civisanalytics.com/spa/#/scripts/new?type=custom&fromTemplateId=11219). +If you've run any of these scripts in Civis Platform, you've already started _productionizing_ your +code. Most loosely, productionizing means that your code now runs on a +remote server instead of your local or development machine. + +You probably already know some of the benefits too: + +1. Easily schedule and automate tasks, and include tasks in workflows. +2. Ensure your code doesn't break in the future when dependencies change. +3. Share code with others without them worrying about dependencies or language compatibility. +4. Rapidly deploy fixes and changes. + +This guide will cover how to programmatically do the same tasks using the API +that you are used to doing in GUI. Instead of typing in values for the parameters or clicking to +download outputs, you can do the same thing in your programs. Hooray for automation! + +Specifically, this guide will cover how to programmatically read outputs, +kick off new script runs, and publish your own script templates +to share your code with others. It will make heavy use of API functions +directly, but highlight convenient wrappers for +common tasks where they have been implemented already. + +Ready? Buckle in! + +## Script Concepts and Overview + +A script is a job that executes code in Civis Platform. A script accepts user input through _parameters_, gives values back to the user as _run outputs_, and records any _logs_ along the way. + +A script author can share language and container scripts with others by letting users _clone_ the script. But if an author makes a change to the script such as fixing a bug or adding a feature, users will have to re-clone the script to +get access to those changes. + +A better way to share code with others is with _template_ scripts. A template script is a 'published' language or container script. The script that the template runs is the _backing script_ of the template. + +Once a container or language script is published as a template, users can create +their own instances of the template. +These instances are called _custom_ scripts and they inherit all changes made to the template. +This feature makes it easy to share code with others and to rapidly deploy changes and fixes. + +## Quick Start + +```{r} +# create a container script with a parameter +script <- scripts_post_containers( + required_resources = list(cpu = 1024, memory = 50, diskSpace = 15), + docker_command = 'cd /package_dir && Rscript inst/run_script.R', + docker_image_name = 'civisanalytics/datascience-r', + name = 'SCRIPT NAME', + params = list( + list(name = 'NAME_OF_ENV_VAR', + label = 'Name User Sees', + type = 'string', + required = TRUE) + ) +) + +# publish the container script as a template +template <- templates_post_scripts(script$id, name = 'TEMPLATE NAME', note = 'Markdown Docs') + +# run a template script, returning file ids of run outputs +out <- run_template(template$id) + +# post a file or JSONValue run output within a script +write_job_output('filename.csv') +json_values_post(jsonlite::toJSON(my_list), 'my_list.json') + +# get run output file ids of a script +out <- fetch_output_file_ids(civis_script(id)) + +# get csv run outputs of a script +df <- read_civis(civis_script(id), regex = '.csv', using = read.csv) + +# get JSONValue run outputs +my_list <- read_civis(civis_script(id)) + +``` + + + +## Creating and Running Scripts + +Let's make these concepts concrete with an example! We'll use the 'R' language script throughout, +but `container` scripts work exactly the same way. In the second section, +we'll cover `custom` and `template` scripts. + +### An Example Script + +The `post` method creates the job and returns a list of metadata about it, including its type. + +```{r, eval = FALSE} +source <- c(' + print("Hello World!") +') +job <- scripts_post_r(name = 'Hello!', source = source) +``` + +Each script can be uniquely identified by its _job id_. If you have a job id +but don't know what kind of script it is, you can do `jobs_get(id)`. + +Each script type is associated with its own API endpoints. For instance, to post a job of each script type, you need `scripts_post_r`, `scripts_post_containers`, `scripts_post_custom`, or `templates_post_scripts`. + +This job hasn't been run yet. To kick off a run do: + +```{r, eval = FALSE} +run <- scripts_post_r_runs(job$id) + +# check the status +scripts_get_r_runs(job$id, run$id) + +# automatically poll until the job completes +await(scripts_get_r_runs, id = job$id, run_id = run$id) +``` + +Since kicking off a job and polling until it completes is +a really common task for this guide, let's make it a function: + +```{r} +run_script <- function(source, name = 'Cool') { + job <- scripts_post_r(name = name, source = source) + run <- scripts_post_r_runs(job$id) + await(scripts_get_r_runs, id = job$id, run_id = run$id) +} +``` + +### Run Outputs +This script isn't very useful because it doesn't produce any output that we can access. +To add an output to a job, we can use `scripts_post_r_runs_outputs`. The two most +common types of run outputs are `Files` and `JSONValues`. + +#### Files + +We can specify adding a `File` as a run output by uploading the object to S3 +with `write_civis_file` and setting `object_type` in `scripts_post_r_runs_outputs` to `File`. +Notice that the environment variables `CIVIS_JOB_ID` and `CIVIS_RUN_ID` are +automatically inserted into the environment for us to have access to. +```{r, eval=FALSE} +source <- c(" + library(civis) + data(iris) + write.csv(iris, 'iris.csv') + job_id <- as.numeric(Sys.getenv('CIVIS_JOB_ID')) + run_id <- as.numeric(Sys.getenv('CIVIS_RUN_ID')) + file_id <- write_civis_file('iris.csv') + scripts_post_r_runs_outputs(job_id, run_id, object_type = 'File', object_id = file_id) +") +run <- run_script(source) +``` + +Since this pattern is so common, we replaced it with the function `write_job_output` which +you can use to post a filename as a run output for any script type. + +```{r, eval=FALSE} +source <- c(" + library(civis) + data(iris) + write.csv(iris, 'iris.csv') + write_job_output('iris.csv') +") +run <- run_script(source) +``` + +#### JSONValues + +It is best practice to make run outputs +as portable as possible because the script can be called by any language. +For arbitrary data, JSONValues are often the best choice. +Regardless, it is user friendly to add the file extension to the name of the run output. + +Adding JSONValue run outputs is common enough +for it to be implemented directly as a Civis API endpoint, `json_values_post`: + +```{r, eval=FALSE} +source <- c(" + library(civis) + library(jsonlite) + my_farm <- list(cows = 1, ducks = list(mallard = 2, goldeneye = 1)) + json_values_post(jsonlite::toJSON(my_farm), name = 'my_farm.json') +") +run_farm <- run_script(source) +``` + +To retrieve script outputs we can use `scripts_list_r_runs_outputs`: + +```{r} +out <- scripts_list_r_runs_outputs(run$rId, run$id) +iris <- read_civis(out$objectId, using = read.csv) +``` + +Since this pattern is also common, you can simply use `read_civis` directly. +This will work for any script type. Use `regex` and `using` to filter +run outputs by file extension, and provide the appropriate reading function. +JSONValues can be read automatically. + +```{r, eval = FALSE} +# get csv run outputs +iris <- read_civis(civis_script(run$rId), regex = '.csv', using = read.csv) + +# get JSONValues +my_farm <- read_civis(civis_script(run_farm$rId)) +``` + +### Script Parameters + +Scripts are more useful if their behavior can be configured by the user, which can be done with script parameters. +Script _parameters_ are placeholders for input by the user. Specific values of the parameters input by +the user are called _arguments_. Here, we modify `run_script` to automatically add a parameter, +and simultaneously take a value of that parameter provided by the user. In the script itself, +we can access the parameter as an environment variable. + +```{r, eval=FALSE} +# Add 'params' and 'arguments' to run_script +run_script <- function(source, args, name = 'Cool') { + params <- list( # params is a list of individual parameters + list( + name = 'PET_NAME', # name of the environment variable with the user value + label = 'Pet Name', # name displaayed to the user + type = 'string', # type + required = TRUE # required? + ) + ) + job <- scripts_post_r(name = name, + source = source, + params = params, + arguments = args) + run <- scripts_post_r_runs(job$id) + await(scripts_get_r_runs, id = job$id, run_id = run$id) +} + +# Access the PET_NAME variable +source <- c(' + library(civis) + pet_name <- Sys.getenv("PET_NAME") + msg <- paste0("Hello", pet_name, "!") + print(msg) +') + +# Let's run it! Here we pass the argument 'Fitzgerald' to the +# parameter 'PET_NAME' that we created. +run_script(source, name = 'Pet Greeting', args = list(PET_NAME = 'Fitzgerald')) + +``` + +## Sharing Scripts with Templates + +Now we have a script. How can we share it with others so that they can use it? The best +way to share scripts is with `templates`. Let's start by simply posting the script above: + +```{r, eval=FALSE} +params <- list( + list( + name = 'PET_NAME', + label = 'Pet Name', + type = 'string', + required = TRUE + ) +) +job <- scripts_post_r(name = 'Pet Greeter', + source = source, + params = params) +``` + +To make this job a template use `templates_post_scripts`. Adding a notes field (markdown format) +describing what the script does, what the parameters are, and what outputs it posts is +often helpful for users. + +```{r, eval=FALSE} +note <- c(" +# Pet Greeter + +Greets your pet, given its name! + +For your pet to receive the greeting, it must be a Civis Platform +user with the ability to read. + +Parameters: + * Pet Name: string, Name of pet. + + +Returns: + * Nothing +") +template <- templates_post_scripts(script_id = job$id, note = note, name = 'Pet Greeter') +``` + +### Custom Scripts +`scripts_post_custom` creates an instance of a template that inherits all changes made to the template. +We can now make a simple program to call and run an instance of the template. + +```{r, eval=FALSE} +job <- scripts_post_custom(id, arguments = arguments, ...) +run <- scripts_post_custom_runs(job$id) +await(scripts_get_custom_runs, id = job$id, run_id = run$id) +``` + +Conveniently, `run_template` does exactly this and is already provided in `civis`. +It returns the output file ids of the job for you to use later on in your program. + +```{r, eval = FALSE} +out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES')) +``` + +To stay organized, let's automatically add the script to an existing project: + +```{r, eval = FALSE} +# We might need to find the project id first +search_list(type = 'project', 'My project Name') +out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'), + target_project_id = project_id) +``` + +### Making Changes + +To make changes to the template note or name, use `templates_patch_scripts`. + +```{r,eval=FALSE} +templates_patch_scripts(template_id$id, note = new_note) +``` + +To change the behavior, name, or parameters of the script, +update the backing script using `scripts_patch_r`. + +* Note: +It is _not recommended_ to make breaking changes to the API of a script by +adding a required parameter, changing a parameter default, or removing a run output. This will break workflows +of your users. Instead of making breaking changes, release a new version of the script. + +```{r, eval = FALSE} +source <- c(' + library(civis) + pet_name <- Sys.getenv("PET_NAME") + msg <- paste0("Hello ", pet_name, "! Would you care for a sandwich?") + print(msg) +') +scripts_patch_r(id = job$id, name = 'Pet Greeter', + source = source, + params = params) +``` + +### Discoverability + +To help share your template with others, use this link: +`https://platform.civisanalytics.com/spa/#/scripts/new/{your template id}`. + +This link will automatically direct the user to a new instance of the template. + +It's a good idea to archive unused templates so that it's easy for users to find +the right template quickly. This is important if you automatically deploy your templates. + +Let's clean up our experiment by archiving our Pet Greeter Template: + +```{r} +templates_patch_scripts(template$id, archived = TRUE) +``` + +## Conclusion + +That's it! Now go forth and productionize! + + + + diff --git a/doc/civis_scripts.html b/doc/civis_scripts.html new file mode 100644 index 00000000..f07e76a6 --- /dev/null +++ b/doc/civis_scripts.html @@ -0,0 +1,697 @@ + + + + + + + + + + + + + + + + +Productionizing with Civis Scripts + + + + + + + + + + + + + + + + + + + + + + + + + + +

Productionizing with Civis Scripts

+

Patrick Miller

+

2023-02-13

+ + + +

Civis Scripts are the way to productionize your code with Civis +Platform. You’ve probably used three of the four types of scripts +already in the Civis Platform UI (“Code” –> “Scripts”): +language (R, +Python3, +javascript, +and sql), +container, +and custom. +If you’ve run any of these scripts in Civis Platform, you’ve already +started productionizing your code. Most loosely, +productionizing means that your code now runs on a remote server instead +of your local or development machine.

+

You probably already know some of the benefits too:

+
    +
  1. Easily schedule and automate tasks, and include tasks in +workflows.
  2. +
  3. Ensure your code doesn’t break in the future when dependencies +change.
  4. +
  5. Share code with others without them worrying about dependencies or +language compatibility.
  6. +
  7. Rapidly deploy fixes and changes.
  8. +
+

This guide will cover how to programmatically do the same tasks using +the API that you are used to doing in GUI. Instead of typing in values +for the parameters or clicking to download outputs, you can do the same +thing in your programs. Hooray for automation!

+

Specifically, this guide will cover how to programmatically read +outputs, kick off new script runs, and publish your own script templates +to share your code with others. It will make heavy use of API functions +directly, but highlight convenient wrappers for common tasks where they +have been implemented already.

+

Ready? Buckle in!

+
+

Script Concepts and Overview

+

A script is a job that executes code in Civis Platform. A script +accepts user input through parameters, gives values back to the +user as run outputs, and records any logs along the +way.

+

A script author can share language and container scripts with others +by letting users clone the script. But if an author makes a +change to the script such as fixing a bug or adding a feature, users +will have to re-clone the script to get access to those changes.

+

A better way to share code with others is with template +scripts. A template script is a ‘published’ language or container +script. The script that the template runs is the backing script +of the template.

+

Once a container or language script is published as a template, users +can create their own instances of the template. These instances are +called custom scripts and they inherit all changes made to the +template. This feature makes it easy to share code with others and to +rapidly deploy changes and fixes.

+
+
+

Quick Start

+
# create a container script with a parameter
+script <- scripts_post_containers(
+  required_resources = list(cpu = 1024, memory = 50, diskSpace = 15),
+  docker_command = 'cd /package_dir && Rscript inst/run_script.R',
+  docker_image_name = 'civisanalytics/datascience-r',
+  name = 'SCRIPT NAME',
+  params = list(
+    list(name = 'NAME_OF_ENV_VAR',
+         label = 'Name User Sees', 
+         type = 'string',
+         required = TRUE)
+  )
+)
+
+# publish the container script as a template 
+template <- templates_post_scripts(script$id, name = 'TEMPLATE NAME', note = 'Markdown Docs')
+
+# run a template script, returning file ids of run outputs
+out <- run_template(template$id)
+
+# post a file or JSONValue run output within a script
+write_job_output('filename.csv')
+json_values_post(jsonlite::toJSON(my_list), 'my_list.json')
+
+# get run output file ids of a script
+out <- fetch_output_file_ids(civis_script(id))
+
+# get csv run outputs of a script
+df <- read_civis(civis_script(id), regex = '.csv', using = read.csv)
+
+# get JSONValue run outputs
+my_list <- read_civis(civis_script(id))
+
+
+

Creating and Running Scripts

+

Let’s make these concepts concrete with an example! We’ll use the ‘R’ +language script throughout, but container scripts work +exactly the same way. In the second section, we’ll cover +custom and template scripts.

+
+

An Example Script

+

The post method creates the job and returns a list of +metadata about it, including its type.

+
source <- c('
+ print("Hello World!")
+')
+job <- scripts_post_r(name = 'Hello!', source = source)
+

Each script can be uniquely identified by its job id. If you +have a job id but don’t know what kind of script it is, you can do +jobs_get(id).

+

Each script type is associated with its own API endpoints. For +instance, to post a job of each script type, you need +scripts_post_r, scripts_post_containers, +scripts_post_custom, or +templates_post_scripts.

+

This job hasn’t been run yet. To kick off a run do:

+
run <- scripts_post_r_runs(job$id)
+
+# check the status
+scripts_get_r_runs(job$id, run$id)
+
+# automatically poll until the job completes
+await(scripts_get_r_runs, id = job$id, run_id = run$id)
+

Since kicking off a job and polling until it completes is a really +common task for this guide, let’s make it a function:

+
run_script <- function(source, name = 'Cool') {
+  job <- scripts_post_r(name = name, source = source)
+  run <- scripts_post_r_runs(job$id)
+  await(scripts_get_r_runs, id = job$id, run_id = run$id)
+}
+
+
+

Run Outputs

+

This script isn’t very useful because it doesn’t produce any output +that we can access. To add an output to a job, we can use +scripts_post_r_runs_outputs. The two most common types of +run outputs are Files and JSONValues.

+
+

Files

+

We can specify adding a File as a run output by +uploading the object to S3 with write_civis_file and +setting object_type in +scripts_post_r_runs_outputs to File. Notice +that the environment variables CIVIS_JOB_ID and +CIVIS_RUN_ID are automatically inserted into the +environment for us to have access to.

+
source <- c("
+ library(civis)
+ data(iris)
+ write.csv(iris, 'iris.csv')
+ job_id <- as.numeric(Sys.getenv('CIVIS_JOB_ID'))
+ run_id <- as.numeric(Sys.getenv('CIVIS_RUN_ID'))
+ file_id <- write_civis_file('iris.csv')
+ scripts_post_r_runs_outputs(job_id, run_id, object_type = 'File', object_id = file_id)
+")
+run <- run_script(source)
+

Since this pattern is so common, we replaced it with the function +write_job_output which you can use to post a filename as a +run output for any script type.

+
source <- c("
+ library(civis)
+ data(iris)
+ write.csv(iris, 'iris.csv')
+ write_job_output('iris.csv')
+")
+run <- run_script(source)
+
+
+

JSONValues

+

It is best practice to make run outputs as portable as possible +because the script can be called by any language. For arbitrary data, +JSONValues are often the best choice. Regardless, it is user friendly to +add the file extension to the name of the run output.

+

Adding JSONValue run outputs is common enough for it to be +implemented directly as a Civis API endpoint, +json_values_post:

+
source <- c("
+ library(civis)
+ library(jsonlite)
+ my_farm <- list(cows = 1, ducks = list(mallard = 2, goldeneye = 1))
+ json_values_post(jsonlite::toJSON(my_farm), name = 'my_farm.json')
+")
+run_farm <- run_script(source)
+

To retrieve script outputs we can use +scripts_list_r_runs_outputs:

+
out <- scripts_list_r_runs_outputs(run$rId, run$id)
+iris <- read_civis(out$objectId, using = read.csv)
+

Since this pattern is also common, you can simply use +read_civis directly. This will work for any script type. +Use regex and using to filter run outputs by +file extension, and provide the appropriate reading function. JSONValues +can be read automatically.

+
# get csv run outputs
+iris <- read_civis(civis_script(run$rId), regex = '.csv', using = read.csv)
+
+# get JSONValues
+my_farm <- read_civis(civis_script(run_farm$rId))
+
+
+
+

Script Parameters

+

Scripts are more useful if their behavior can be configured by the +user, which can be done with script parameters. Script +parameters are placeholders for input by the user. Specific +values of the parameters input by the user are called +arguments. Here, we modify run_script to +automatically add a parameter, and simultaneously take a value of that +parameter provided by the user. In the script itself, we can access the +parameter as an environment variable.

+
# Add 'params' and 'arguments' to run_script
+run_script <- function(source, args, name = 'Cool') {
+  params <- list(          # params is a list of individual parameters
+    list(
+      name = 'PET_NAME',   # name of the environment variable with the user value
+      label = 'Pet Name',  # name displaayed to the user
+      type = 'string',     # type 
+      required = TRUE      # required?
+    )
+  )
+  job <- scripts_post_r(name = name, 
+                        source = source, 
+                        params = params, 
+                        arguments = args)
+  run <- scripts_post_r_runs(job$id)
+  await(scripts_get_r_runs, id = job$id, run_id = run$id)
+}
+
+# Access the PET_NAME variable
+source <- c('
+  library(civis)
+  pet_name <- Sys.getenv("PET_NAME")
+  msg <- paste0("Hello", pet_name, "!")
+  print(msg)
+')
+
+# Let's run it! Here we pass the argument 'Fitzgerald' to the 
+# parameter 'PET_NAME' that we created.
+run_script(source, name = 'Pet Greeting', args = list(PET_NAME = 'Fitzgerald'))
+
+
+
+

Sharing Scripts with Templates

+

Now we have a script. How can we share it with others so that they +can use it? The best way to share scripts is with +templates. Let’s start by simply posting the script +above:

+
params <- list(          
+  list(
+    name = 'PET_NAME',   
+    label = 'Pet Name',  
+    type = 'string',     
+    required = TRUE      
+  )
+)
+job <- scripts_post_r(name = 'Pet Greeter', 
+                      source = source, 
+                      params = params)
+

To make this job a template use templates_post_scripts. +Adding a notes field (markdown format) describing what the script does, +what the parameters are, and what outputs it posts is often helpful for +users.

+
note <- c("
+# Pet Greeter
+
+Greets your pet, given its name! 
+ 
+For your pet to receive the greeting, it must be a Civis Platform
+user with the ability to read.
+ 
+Parameters:
+  * Pet Name: string, Name of pet.
+
+  
+Returns:
+  * Nothing
+")
+template <- templates_post_scripts(script_id = job$id, note = note, name = 'Pet Greeter')
+
+

Custom Scripts

+

scripts_post_custom creates an instance of a template +that inherits all changes made to the template. We can now make a simple +program to call and run an instance of the template.

+
job <- scripts_post_custom(id, arguments = arguments, ...)
+run <- scripts_post_custom_runs(job$id)
+await(scripts_get_custom_runs, id = job$id, run_id = run$id)
+

Conveniently, run_template does exactly this and is +already provided in civis. It returns the output file ids +of the job for you to use later on in your program.

+
out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'))
+

To stay organized, let’s automatically add the script to an existing +project:

+
# We might need to find the project id first
+search_list(type = 'project', 'My project Name')
+out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'),
+                    target_project_id = project_id)
+
+
+

Making Changes

+

To make changes to the template note or name, use +templates_patch_scripts.

+
templates_patch_scripts(template_id$id, note = new_note)
+

To change the behavior, name, or parameters of the script, update the +backing script using scripts_patch_r.

+
    +
  • Note: It is not recommended to make breaking changes to the +API of a script by adding a required parameter, changing a parameter +default, or removing a run output. This will break workflows of your +users. Instead of making breaking changes, release a new version of the +script.
  • +
+
source <- c('
+  library(civis)
+  pet_name <- Sys.getenv("PET_NAME")
+  msg <- paste0("Hello ", pet_name, "! Would you care for a sandwich?")
+  print(msg)
+')
+scripts_patch_r(id = job$id, name = 'Pet Greeter',
+                source = source,
+                params = params)
+
+
+

Discoverability

+

To help share your template with others, use this link: +https://platform.civisanalytics.com/spa/#/scripts/new/{your template id}.

+

This link will automatically direct the user to a new instance of the +template.

+

It’s a good idea to archive unused templates so that it’s easy for +users to find the right template quickly. This is important if you +automatically deploy your templates.

+

Let’s clean up our experiment by archiving our Pet Greeter +Template:

+
templates_patch_scripts(template$id, archived = TRUE)
+
+
+
+

Conclusion

+

That’s it! Now go forth and productionize!

+
+ + + + + + + + + + + diff --git a/doc/concurrency.R b/doc/concurrency.R new file mode 100644 index 00000000..5752cd0c --- /dev/null +++ b/doc/concurrency.R @@ -0,0 +1,86 @@ +## ---- eval=FALSE-------------------------------------------------------------- +# nap <- function(seconds) { +# Sys.sleep(seconds) +# } +# +# start <- Sys.time() +# nap(1) +# nap(2) +# nap(3) +# end <- Sys.time() +# print(end - start) + +## ---- eval=FALSE-------------------------------------------------------------- +# library(future) +# library(civis) +# +# # Define a concurrent backend with enough processes so each function +# # we want to run concurrently has its own process. Here we'll need at least 2. +# plan("multiprocess", workers=10) +# +# # Load data +# data(iris) +# data(airquality) +# airquality <- airquality[!is.na(airquality$Ozone),] # remove missing in dv +# +# # Create a future for each model, using the special %<-% assignment operator. +# # These futures are created immediately, kicking off the models. +# air_model %<-% civis_ml(airquality, "Ozone", "gradient_boosting_regressor") +# iris_model %<-% civis_ml(iris, "Species", "sparse_logistic") +# +# # At this point, `air_model` has not finished training yet. That's okay, +# # the program will just wait until `air_model` is done before printing it. +# print("airquality R^2:") +# print(air_model$metrics$metrics$r_squared) +# print("iris ROC:") +# print(iris_model$metrics$metrics$roc_auc) + +## ---- eval=FALSE-------------------------------------------------------------- +# library(parallel) +# library(doParallel) +# library(foreach) +# library(civis) +# +# # Register a local cluster with enough processes so each function +# # we want to run concurrently has its own process. Here we'll +# # need at least 3, with 1 for each model_type in model_types. +# cluster <- makeCluster(10) +# registerDoParallel(cluster) +# +# # Model types to build +# model_types <- c("sparse_logistic", +# "gradient_boosting_classifier", +# "random_forest_classifier") +# +# # Load data +# data(iris) +# +# # Listen for multiple models to complete concurrently +# model_results <- foreach(model_type=iter(model_types), .packages='civis') %dopar% { +# civis_ml(iris, "Species", model_type) +# } +# stopCluster(cluster) +# print("ROC Results") +# lapply(model_results, function(result) result$metrics$metrics$roc_auc) + +## ---- eval=FALSE-------------------------------------------------------------- +# library(civis) +# library(parallel) +# +# # Model types to build +# model_types <- c("sparse_logistic", +# "gradient_boosting_classifier", +# "random_forest_classifier") +# +# # Load data +# data(iris) +# +# # Loop over all models in parallel with a max of 10 processes +# model_results <- mclapply(model_types, function(model_type) { +# civis_ml(iris, "Species", model_type) +# }, mc.cores=10) +# +# # Wait for all models simultaneously +# print("ROC Results") +# lapply(model_results, function(result) result$metrics$metrics$roc_auc) + diff --git a/doc/concurrency.Rmd b/doc/concurrency.Rmd new file mode 100644 index 00000000..fbfbb152 --- /dev/null +++ b/doc/concurrency.Rmd @@ -0,0 +1,157 @@ +--- +title: "Making Simultaneous Calls to Platform" +date: "2017-08-14" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Asychronous Programming} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +## Concurrency in the Civis R Client + +Just like most functions in R, all functions in `civis` block. This means +that each function in a program must complete before the next function runs. +For instance, + +```{r, eval=FALSE} +nap <- function(seconds) { + Sys.sleep(seconds) +} + +start <- Sys.time() +nap(1) +nap(2) +nap(3) +end <- Sys.time() +print(end - start) +``` + +This program takes 6 seconds to complete, since it takes 1 second for the +first `nap`, 2 for the second and 3 for the last. This program is easy to +reason about because each function is sequentially executed. Usually, that +is how we want our programs to run. + +There are some exceptions to this rule. Sequentially executing each function +might be inconvenient if each `nap` took 30 minutes instead of a few seconds. +In that case, we might like our program to perform all 3 naps simultaneously. +In the above example, running all 3 naps simultaneously would take 3 seconds +(the length of the longest nap) rather than 6 seconds. + +As all function calls in `civis` block, `civis` relies on the mature +R ecosystem for parallel programming to enable multiple simultaneous +tasks. The three packages we introduce are `future`, `foreach`, and +`parallel` (included in base R). For all packages, simultaneous tasks are +enabled by starting each task in a separate R process. Examples for building +several models in parallel with different libraries are included below. The +libraries have strengths and weaknesses and choosing which library to use is +often a matter of preference. + +It is important to note that when calling `civis` functions, the computation +required to complete the task takes place in Platform. For instance, during +a call to `civis_ml`, Platform builds the model while your laptop waits +for the task to complete. This means that you don't have to worry about +running out of memory or cpu cores on your laptop when training dozens of +models, or when scoring a model on a very large population. The task being +parallelized in the code below is simply the task of waiting for Platform to +send results back to your laptop. + + +## Building Many Models with `future` + +```{r, eval=FALSE} +library(future) +library(civis) + +# Define a concurrent backend with enough processes so each function +# we want to run concurrently has its own process. Here we'll need at least 2. +plan("multiprocess", workers=10) + +# Load data +data(iris) +data(airquality) +airquality <- airquality[!is.na(airquality$Ozone),] # remove missing in dv + +# Create a future for each model, using the special %<-% assignment operator. +# These futures are created immediately, kicking off the models. +air_model %<-% civis_ml(airquality, "Ozone", "gradient_boosting_regressor") +iris_model %<-% civis_ml(iris, "Species", "sparse_logistic") + +# At this point, `air_model` has not finished training yet. That's okay, +# the program will just wait until `air_model` is done before printing it. +print("airquality R^2:") +print(air_model$metrics$metrics$r_squared) +print("iris ROC:") +print(iris_model$metrics$metrics$roc_auc) +``` + + +## Building Many Models with `foreach` + +```{r, eval=FALSE} +library(parallel) +library(doParallel) +library(foreach) +library(civis) + +# Register a local cluster with enough processes so each function +# we want to run concurrently has its own process. Here we'll +# need at least 3, with 1 for each model_type in model_types. +cluster <- makeCluster(10) +registerDoParallel(cluster) + +# Model types to build +model_types <- c("sparse_logistic", + "gradient_boosting_classifier", + "random_forest_classifier") + +# Load data +data(iris) + +# Listen for multiple models to complete concurrently +model_results <- foreach(model_type=iter(model_types), .packages='civis') %dopar% { + civis_ml(iris, "Species", model_type) +} +stopCluster(cluster) +print("ROC Results") +lapply(model_results, function(result) result$metrics$metrics$roc_auc) +``` + +## Building Many Models with `mcparallel` + +Note: `mcparallel` relies on forking and thus is not available on Windows. + +```{r, eval=FALSE} +library(civis) +library(parallel) + +# Model types to build +model_types <- c("sparse_logistic", + "gradient_boosting_classifier", + "random_forest_classifier") + +# Load data +data(iris) + +# Loop over all models in parallel with a max of 10 processes +model_results <- mclapply(model_types, function(model_type) { + civis_ml(iris, "Species", model_type) +}, mc.cores=10) + +# Wait for all models simultaneously +print("ROC Results") +lapply(model_results, function(result) result$metrics$metrics$roc_auc) +``` + +## Operating System / Environment Specific Errors + +Differences in operating systems and R environments may cause errors for +some users of the parallel libraries listed above. In particular, +`mclapply` does not work on Windows and may not work in RStudio on +certain operating systems. `future` may require `plan(multisession)` on +certain operating systems. If you encounter an error parallelizing +functions in `civis`, we recommend first trying more than one method +listed above. While we will address errors specific to `civis` with +regards to parallel code, the technicalities of parallel libraries in +R across operating systems and environments prevent us from providing +more general support for issues regarding parallelized code in R. diff --git a/doc/concurrency.html b/doc/concurrency.html new file mode 100644 index 00000000..d1c192d8 --- /dev/null +++ b/doc/concurrency.html @@ -0,0 +1,499 @@ + + + + + + + + + + + + + + + +Making Simultaneous Calls to Platform + + + + + + + + + + + + + + + + + + + + + + + + + + +

Making Simultaneous Calls to Platform

+

2017-08-14

+ + + +
+

Concurrency in the Civis R Client

+

Just like most functions in R, all functions in civis +block. This means that each function in a program must complete before +the next function runs. For instance,

+
nap <- function(seconds) {
+    Sys.sleep(seconds)
+}
+
+start <- Sys.time()
+nap(1)
+nap(2)
+nap(3)
+end <- Sys.time()
+print(end - start)
+

This program takes 6 seconds to complete, since it takes 1 second for +the first nap, 2 for the second and 3 for the last. This +program is easy to reason about because each function is sequentially +executed. Usually, that is how we want our programs to run.

+

There are some exceptions to this rule. Sequentially executing each +function might be inconvenient if each nap took 30 minutes +instead of a few seconds. In that case, we might like our program to +perform all 3 naps simultaneously. In the above example, running all 3 +naps simultaneously would take 3 seconds (the length of the longest nap) +rather than 6 seconds.

+

As all function calls in civis block, civis +relies on the mature R ecosystem for parallel programming to enable +multiple simultaneous tasks. The three packages we introduce are +future, foreach, and parallel +(included in base R). For all packages, simultaneous tasks are enabled +by starting each task in a separate R process. Examples for building +several models in parallel with different libraries are included below. +The libraries have strengths and weaknesses and choosing which library +to use is often a matter of preference.

+

It is important to note that when calling civis +functions, the computation required to complete the task takes place in +Platform. For instance, during a call to civis_ml, Platform +builds the model while your laptop waits for the task to complete. This +means that you don’t have to worry about running out of memory or cpu +cores on your laptop when training dozens of models, or when scoring a +model on a very large population. The task being parallelized in the +code below is simply the task of waiting for Platform to send results +back to your laptop.

+
+
+

Building Many Models with future

+
library(future)
+library(civis)
+
+# Define a concurrent backend with enough processes so each function
+# we want to run concurrently has its own process. Here we'll need at least 2.
+plan("multiprocess", workers=10)
+
+# Load data
+data(iris)
+data(airquality)
+airquality <- airquality[!is.na(airquality$Ozone),]  # remove missing in dv
+
+# Create a future for each model, using the special %<-% assignment operator.
+# These futures are created immediately, kicking off the models.
+air_model %<-% civis_ml(airquality, "Ozone", "gradient_boosting_regressor")
+iris_model %<-% civis_ml(iris, "Species", "sparse_logistic")
+
+# At this point, `air_model` has not finished training yet. That's okay,
+# the program will just wait until `air_model` is done before printing it.
+print("airquality R^2:")
+print(air_model$metrics$metrics$r_squared)
+print("iris ROC:")
+print(iris_model$metrics$metrics$roc_auc)
+
+
+

Building Many Models with foreach

+
library(parallel)
+library(doParallel)
+library(foreach)
+library(civis)
+
+# Register a local cluster with enough processes so each function
+# we want to run concurrently has its own process. Here we'll
+# need at least 3, with 1 for each model_type in model_types.
+cluster <- makeCluster(10)
+registerDoParallel(cluster)
+
+# Model types to build
+model_types <- c("sparse_logistic",
+                 "gradient_boosting_classifier",
+                 "random_forest_classifier")
+
+# Load data
+data(iris)
+
+# Listen for multiple models to complete concurrently
+model_results <- foreach(model_type=iter(model_types), .packages='civis') %dopar% {
+    civis_ml(iris, "Species", model_type)
+}
+stopCluster(cluster)
+print("ROC Results")
+lapply(model_results, function(result) result$metrics$metrics$roc_auc)
+
+
+

Building Many Models with mcparallel

+

Note: mcparallel relies on forking and thus is not +available on Windows.

+
library(civis)
+library(parallel)
+
+# Model types to build
+model_types <- c("sparse_logistic",
+                 "gradient_boosting_classifier",
+                 "random_forest_classifier")
+
+# Load data
+data(iris)
+
+# Loop over all models in parallel with a max of 10 processes
+model_results <- mclapply(model_types, function(model_type) {
+  civis_ml(iris, "Species", model_type)
+}, mc.cores=10)
+
+# Wait for all models simultaneously
+print("ROC Results")
+lapply(model_results, function(result) result$metrics$metrics$roc_auc)
+
+
+

Operating System / Environment Specific Errors

+

Differences in operating systems and R environments may cause errors +for some users of the parallel libraries listed above. In particular, +mclapply does not work on Windows and may not work in +RStudio on certain operating systems. future may require +plan(multisession) on certain operating systems. If you +encounter an error parallelizing functions in civis, we +recommend first trying more than one method listed above. While we will +address errors specific to civis with regards to parallel +code, the technicalities of parallel libraries in R across operating +systems and environments prevent us from providing more general support +for issues regarding parallelized code in R.

+
+ + + + + + + + + + + diff --git a/doc/data_import_and_export.R b/doc/data_import_and_export.R new file mode 100644 index 00000000..62029d9d --- /dev/null +++ b/doc/data_import_and_export.R @@ -0,0 +1,81 @@ +## ----eval=FALSE--------------------------------------------------------------- +# df <- read_civis("schema.tablename", database = "my-database") + +## ----eval=FALSE--------------------------------------------------------------- +# options(civis.default_db = "my-database") +# df <- read_civis("schema.tablename") + +## ----eval=FALSE--------------------------------------------------------------- +# query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" +# df <- read_civis(sql(query)) + +## ---- eval=FALSE-------------------------------------------------------------- +# data(iris) +# id <- write_civis_file(iris) +# df <- read_civis(id) + +## ----eval=FALSE--------------------------------------------------------------- +# query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" +# df <- read_civis(sql(query), colClasses = "character") +# df2 <- read_civis(sql(query), as.is = TRUE) + +## ----eval=FALSE--------------------------------------------------------------- +# options(civis.default_db = "my_database") +# df <- data.frame(x = rnorm(100), y = rnorm(100), z = rnorm(100)) +# write_civis(df, tablename = "schema.tablename", +# distkey = "id", sortkey1 = "date", sortkey2 = "type") + +## ----eval=FALSE--------------------------------------------------------------- +# write_civis(df, tablename = "schema.tablename", if_exists = "append") +# write_civis(df, tablename = "schema.tablename", if_exists = "truncate") + +## ---- eval=FALSE-------------------------------------------------------------- +# write_civis("~/path/to/my_data.csv", tablename="schema.tablename") + +## ---- eval = FALSE------------------------------------------------------------ +# # Upload a data frame +# data(iris) +# id <- write_civis_file(iris) +# iris2 <- read_civis(id) +# +# # Upload an arbitrary R object +# farm <- list(chickens = 1, ducks = 4, pigs = 2, cows = 1) +# id <- write_civis_file(farm) +# farm2 <- read_civis(id, using = readRDS) +# + +## ---- eval = FALSE------------------------------------------------------------ +# id <- write_civis_file("path/to/my_data.json") +# read_civis(id, using = jsonlite::fromJSON) + +## ----eval=FALSE--------------------------------------------------------------- +# query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" +# download_civis(sql(query), file = "path/to/my_file.csv") +# download_civis("schema.tablename", file = "path/to/my_file.csv") +# +# id <- write_civis_file(iris) +# download_civis(id, file = "path/to/my_iris.rds") + +## ----eval=FALSE--------------------------------------------------------------- +# q_res <- query_civis("GRANT ALL ON schema.my_table TO GROUP admin") + +## ---- eval = FALSE------------------------------------------------------------ +# id <- q_res$id +# query_civis(id) + +## ---- eval=FALSE-------------------------------------------------------------- +# Error in api_key() : +# The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '') + +## ---- eval=FALSE-------------------------------------------------------------- +# read_civis(sql("SELECT * FROM schema.tablename WHERE 1 = 0")) +# Error in download_script_results(run$script_id, run$run_id) : +# Query produced no output. + +## ---- eval=FALSE-------------------------------------------------------------- +# Error in get_db(database) : +# Argument database is NULL and options("civis.default_db") not set. Set this option using options(civis.default_db = "my_database") + +## ---- eval=FALSE-------------------------------------------------------------- +# sapply(databases_list(), function(x) x$name) + diff --git a/doc/data_import_and_export.Rmd b/doc/data_import_and_export.Rmd new file mode 100644 index 00000000..5d364e76 --- /dev/null +++ b/doc/data_import_and_export.Rmd @@ -0,0 +1,240 @@ +--- +title: "Data Import and Export" +author: "Patrick Miller, Keith Ingersoll" +date: "2017-08-14" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Civis IO} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +## Passing Data Back and Forth + +Often the simplest, but most useful operation when working with the +Platform is to move data in and out. From the perspective of the R +client, we call moving data from the Platform to the local +machine *reading*. Likewise, moving data from the local +machine to the Platform is called *writing*. + +The `civis` client handles data imports and exports in two basic ways: + +1. Moving data directly between the R workspace and the Platform (the most common use case). +2. Moving data between the Platform and local csv files (this is useful for large data that doesn't fit into memory). + +Data can be stored on Platform in two places: + +1. Amazon Redshift, a SQL database. +2. Amazon S3, also referred to as the 'files' endpoint. + +Tables in Redshift are accessed and modified using SQL queries. Tables in Redshift +can be easily shared and used in multiple workflows by multiple people. +However, importing and exporting even small files on Redshift can be slow. + +R objects and arbitrary files can be stored on Amazon S3, and are accessed +using a numeric file id. Data frames are uploaded as CSVs for portability, +and arbitrary R objects are serialized using `saveRDS` for speed and efficiency. + +## Reading Data Into R From Platform + +The main workhorse for getting data from Platform is `read_civis`. +This function is designed to work similarly to the built in function +`read.csv`, returning a dataframe from a table in Platform. For more flexibility, +`read_civis` can download files from Redshift using an SQL query, or download a +file from S3 ('the files endpoint') using a file id. + +To read from a table in Platform, simply provide the name of the schema, +table within the schema, and the database: + +```{r eval=FALSE} +df <- read_civis("schema.tablename", database = "my-database") +``` + +For convenience, a default database can be set in the package options, and not specified +in further calls to any IO function. If there is only one database available, this +database will automatically be used as the default. +In the examples that follow, we assume that a default database has been set. + +```{r eval=FALSE} +options(civis.default_db = "my-database") +df <- read_civis("schema.tablename") +``` + +`read_civis` accepts SQL queries when more flexibility is needed. This is accomplished +by wrapping `sql(...)` around a string containing the query. With `read_civis`, +queries are always read only, and always return a `data.frame`. + +```{r eval=FALSE} +query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" +df <- read_civis(sql(query)) +``` + +Finally, `read_civis` accepts a file id as the first argument to read in files +from S3 as data frames. IDs are obtained from `write_civis_file`. + +```{r, eval=FALSE} +data(iris) +id <- write_civis_file(iris) +df <- read_civis(id) +``` + +For maximum flexibility, `read_civis` accepts parameters from `read.csv` which +can be used to define data types when the defaults are not appropriate. +For instance, when numbers should be read in as characters or when strings +shouldn't be read in as factors. + +```{r eval=FALSE} +query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" +df <- read_civis(sql(query), colClasses = "character") +df2 <- read_civis(sql(query), as.is = TRUE) +``` + + +## Uploading Data to a Database + +The complement to reading data into the R workspace is writing data +to the Platform. The function `write_civis` uploads data frames or csv files to +an Amazon Redshift database. The function `write_civis_file` uploads R objects and +arbitrary files to Amazon S3 (the files endpoint). + +When creating a new table, `write_civis` relies on Platform to determine +data types. Distkeys and sortkeys can optionally be set to improve query performance. +Again, we set a default database in these examples for convenience. + +```{r eval=FALSE} +options(civis.default_db = "my_database") +df <- data.frame(x = rnorm(100), y = rnorm(100), z = rnorm(100)) +write_civis(df, tablename = "schema.tablename", + distkey = "id", sortkey1 = "date", sortkey2 = "type") +``` + +By default, `write_civis` will fail if the table passed in `tablename` +already exists. Optionally, `write_civis` can append to an existing +table. It may also delete all rows and then append (truncate). If +specific datatypes are required, a table may first be created with a +SQL `CREATE TABLE` command and then data can be inserted with +`write_civis`. + +```{r eval=FALSE} +write_civis(df, tablename = "schema.tablename", if_exists = "append") +write_civis(df, tablename = "schema.tablename", if_exists = "truncate") +``` + +If a csv file is saved to disk but not loaded in the R workspace, +`write_civis` will upload the csv to Platform without needing +first load the csv into RAM. This can save time when a file is large. +Uploading a csv directly to Platform is done by simply passing the file name +and path to `write_civis` as the first argument: + +```{r, eval=FALSE} +write_civis("~/path/to/my_data.csv", tablename="schema.tablename") +``` + +## Uploading Data to S3 + +Finally, `write_civis_file` uploads data frames, R objects and files to Amazon S3, which is also +referred to as the 'files endpoint.' Data frames are uploaded as CSVs. +R objects saved to the files endpoint and are serialized using `saveRDS`. + +Data frames and R objects can be loaded back into memory by passing the +file id to `read_civis`, and an appropriate `using` argument. + +```{r, eval = FALSE} +# Upload a data frame +data(iris) +id <- write_civis_file(iris) +iris2 <- read_civis(id) + +# Upload an arbitrary R object +farm <- list(chickens = 1, ducks = 4, pigs = 2, cows = 1) +id <- write_civis_file(farm) +farm2 <- read_civis(id, using = readRDS) + +``` + +When passed a file name and path, `write_civis_file` will upload the file to S3 as-is. +To read the file back into memory, an appropriate function to convert the +file to a data frame must be provided to the `using` argument of `read_civis`. +For example, a JSON file can be read back into R using `jsonlite::fromJSON`. + +```{r, eval = FALSE} +id <- write_civis_file("path/to/my_data.json") +read_civis(id, using = jsonlite::fromJSON) +``` + + +## Downloading Large Data Sets from Platform. + +Occasionally, a table may be too large to store in memory. `download_civis` +can be used in place of `read_civis` to download data straight to disk from Platform. + +Like `read_civis`, `download_civis` can download files from Amazon Redshift by passing +`schema.tablename`, or `sql(...)` as the first argument. Files can be downloaded from +Amazon S3 by passing the file id to `download_civis`. + +```{r eval=FALSE} +query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" +download_civis(sql(query), file = "path/to/my_file.csv") +download_civis("schema.tablename", file = "path/to/my_file.csv") + +id <- write_civis_file(iris) +download_civis(id, file = "path/to/my_iris.rds") +``` + + +## Running Queries on Platform + +Arbitrary queries can be run on Redshift using `query_civis`, which returns the meta-data +of the query. + +```{r eval=FALSE} +q_res <- query_civis("GRANT ALL ON schema.my_table TO GROUP admin") +``` + +Existing queries can be re-run by passing the query id to `query_civis`: + +```{r, eval = FALSE} +id <- q_res$id +query_civis(id) +``` + +## Common Errors + +#### Civis API key not properly set or has expired. +Often an improper API key will return an error like below: +```{r, eval=FALSE} + Error in api_key() : + The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '') +``` + +However, there may be cases where the errors are less straightforward. It +is a good idea to test that API credentials are properly set with a simple +call such as `civis::users_list_me()`. See the README to set +up API keys correctly. + +#### Query does not return any results. +This may happen if a table is empty +or when no rows match a `WHERE` statement. To fix, double check that +the query is correct or the table is not empty. + +```{r, eval=FALSE} +read_civis(sql("SELECT * FROM schema.tablename WHERE 1 = 0")) +Error in download_script_results(run$script_id, run$run_id) : + Query produced no output. +``` + +#### Database not set correctly. +For both `read_civis` and `write_civis`, the database must be set to the +correct, case sensitive name (not hostname) of +the database. + +```{r, eval=FALSE} + Error in get_db(database) : + Argument database is NULL and options("civis.default_db") not set. Set this option using options(civis.default_db = "my_database") +``` + +To see a complete list of database names, run: + +```{r, eval=FALSE} +sapply(databases_list(), function(x) x$name) +``` diff --git a/doc/data_import_and_export.html b/doc/data_import_and_export.html new file mode 100644 index 00000000..fdc2c624 --- /dev/null +++ b/doc/data_import_and_export.html @@ -0,0 +1,551 @@ + + + + + + + + + + + + + + + + +Data Import and Export + + + + + + + + + + + + + + + + + + + + + + + + + + +

Data Import and Export

+

Patrick Miller, Keith Ingersoll

+

2017-08-14

+ + + +
+

Passing Data Back and Forth

+

Often the simplest, but most useful operation when working with the +Platform is to move data in and out. From the perspective of the R +client, we call moving data from the Platform to the local machine +reading. Likewise, moving data from the local machine to the +Platform is called writing.

+

The civis client handles data imports and exports in two +basic ways:

+
    +
  1. Moving data directly between the R workspace and the Platform (the +most common use case).
  2. +
  3. Moving data between the Platform and local csv files (this is useful +for large data that doesn’t fit into memory).
  4. +
+

Data can be stored on Platform in two places:

+
    +
  1. Amazon Redshift, a SQL database.
  2. +
  3. Amazon S3, also referred to as the ‘files’ endpoint.
  4. +
+

Tables in Redshift are accessed and modified using SQL queries. +Tables in Redshift can be easily shared and used in multiple workflows +by multiple people. However, importing and exporting even small files on +Redshift can be slow.

+

R objects and arbitrary files can be stored on Amazon S3, and are +accessed using a numeric file id. Data frames are uploaded as CSVs for +portability, and arbitrary R objects are serialized using +saveRDS for speed and efficiency.

+
+
+

Reading Data Into R From Platform

+

The main workhorse for getting data from Platform is +read_civis. This function is designed to work similarly to +the built in function read.csv, returning a dataframe from +a table in Platform. For more flexibility, read_civis can +download files from Redshift using an SQL query, or download a file from +S3 (‘the files endpoint’) using a file id.

+

To read from a table in Platform, simply provide the name of the +schema, table within the schema, and the database:

+
df <- read_civis("schema.tablename", database = "my-database")
+

For convenience, a default database can be set in the package +options, and not specified in further calls to any IO function. If there +is only one database available, this database will automatically be used +as the default. In the examples that follow, we assume that a default +database has been set.

+
options(civis.default_db = "my-database")
+df <- read_civis("schema.tablename")
+

read_civis accepts SQL queries when more flexibility is +needed. This is accomplished by wrapping sql(...) around a +string containing the query. With read_civis, queries are +always read only, and always return a data.frame.

+
query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23"
+df <- read_civis(sql(query))
+

Finally, read_civis accepts a file id as the first +argument to read in files from S3 as data frames. IDs are obtained from +write_civis_file.

+
data(iris)
+id <- write_civis_file(iris)
+df <- read_civis(id)
+

For maximum flexibility, read_civis accepts parameters +from read.csv which can be used to define data types when +the defaults are not appropriate. For instance, when numbers should be +read in as characters or when strings shouldn’t be read in as +factors.

+
query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23"
+df <- read_civis(sql(query), colClasses = "character")
+df2 <- read_civis(sql(query), as.is = TRUE)
+
+
+

Uploading Data to a Database

+

The complement to reading data into the R workspace is writing data +to the Platform. The function write_civis uploads data +frames or csv files to an Amazon Redshift database. The function +write_civis_file uploads R objects and arbitrary files to +Amazon S3 (the files endpoint).

+

When creating a new table, write_civis relies on +Platform to determine data types. Distkeys and sortkeys can optionally +be set to improve query performance. Again, we set a default database in +these examples for convenience.

+
options(civis.default_db = "my_database")
+df <- data.frame(x = rnorm(100), y = rnorm(100), z = rnorm(100))
+write_civis(df, tablename = "schema.tablename",
+            distkey = "id", sortkey1 = "date", sortkey2 = "type")
+

By default, write_civis will fail if the table passed in +tablename already exists. Optionally, +write_civis can append to an existing table. It may also +delete all rows and then append (truncate). If specific datatypes are +required, a table may first be created with a SQL +CREATE TABLE command and then data can be inserted with +write_civis.

+
write_civis(df, tablename = "schema.tablename", if_exists = "append")
+write_civis(df, tablename = "schema.tablename", if_exists = "truncate")
+

If a csv file is saved to disk but not loaded in the R workspace, +write_civis will upload the csv to Platform without needing +first load the csv into RAM. This can save time when a file is large. +Uploading a csv directly to Platform is done by simply passing the file +name and path to write_civis as the first argument:

+
write_civis("~/path/to/my_data.csv", tablename="schema.tablename")
+
+
+

Uploading Data to S3

+

Finally, write_civis_file uploads data frames, R objects +and files to Amazon S3, which is also referred to as the ‘files +endpoint.’ Data frames are uploaded as CSVs. R objects saved to the +files endpoint and are serialized using saveRDS.

+

Data frames and R objects can be loaded back into memory by passing +the file id to read_civis, and an appropriate +using argument.

+
# Upload a data frame
+data(iris)
+id <- write_civis_file(iris)
+iris2 <- read_civis(id)
+
+# Upload an arbitrary R object
+farm <- list(chickens = 1, ducks = 4, pigs = 2, cows = 1)
+id <- write_civis_file(farm)
+farm2 <- read_civis(id, using = readRDS)
+

When passed a file name and path, write_civis_file will +upload the file to S3 as-is. To read the file back into memory, an +appropriate function to convert the file to a data frame must be +provided to the using argument of read_civis. +For example, a JSON file can be read back into R using +jsonlite::fromJSON.

+
id <- write_civis_file("path/to/my_data.json")
+read_civis(id, using = jsonlite::fromJSON)
+
+
+

Downloading Large Data Sets from Platform.

+

Occasionally, a table may be too large to store in memory. +download_civis can be used in place of +read_civis to download data straight to disk from +Platform.

+

Like read_civis, download_civis can +download files from Amazon Redshift by passing +schema.tablename, or sql(...) as the first +argument. Files can be downloaded from Amazon S3 by passing the file id +to download_civis.

+
query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23"
+download_civis(sql(query), file = "path/to/my_file.csv")
+download_civis("schema.tablename", file = "path/to/my_file.csv")
+
+id <- write_civis_file(iris)
+download_civis(id, file = "path/to/my_iris.rds")
+
+
+

Running Queries on Platform

+

Arbitrary queries can be run on Redshift using +query_civis, which returns the meta-data of the query.

+
q_res <- query_civis("GRANT ALL ON schema.my_table TO GROUP admin")
+

Existing queries can be re-run by passing the query id to +query_civis:

+
id <- q_res$id
+query_civis(id)
+
+
+

Common Errors

+
+

Civis API key not properly set or has expired.

+

Often an improper API key will return an error like below:

+
 Error in api_key() : 
+  The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '<api_key>') 
+

However, there may be cases where the errors are less +straightforward. It is a good idea to test that API credentials are +properly set with a simple call such as +civis::users_list_me(). See the README to set up API keys +correctly.

+
+
+

Query does not return any results.

+

This may happen if a table is empty or when no rows match a +WHERE statement. To fix, double check that the query is +correct or the table is not empty.

+
read_civis(sql("SELECT * FROM schema.tablename WHERE 1 = 0"))
+Error in download_script_results(run$script_id, run$run_id) : 
+  Query produced no output. 
+
+
+

Database not set correctly.

+

For both read_civis and write_civis, the +database must be set to the correct, case sensitive name (not hostname) +of the database.

+
 Error in get_db(database) : 
+  Argument database is NULL and options("civis.default_db") not set. Set this option using options(civis.default_db = "my_database") 
+

To see a complete list of database names, run:

+
sapply(databases_list(), function(x) x$name)
+
+
+ + + + + + + + + + + diff --git a/doc/quick_start.R b/doc/quick_start.R new file mode 100644 index 00000000..0a3ae8bc --- /dev/null +++ b/doc/quick_start.R @@ -0,0 +1,57 @@ +## ---- eval=FALSE-------------------------------------------------------------- +# name <- civis::users_list_me()$name +# paste(name, "is really awesome!") + +## ---- eval=FALSE-------------------------------------------------------------- +# library(civis) +# +# # First we'll load a dataframe of the famous iris dataset +# data(iris) +# +# # We'll set a default database and define the table where want to +# # store our data +# options(civis.default_db = "my_database") +# iris_tablename <- "my_schema.my_table" +# +# # Next we'll push the data to the database table +# write_civis(iris, iris_tablename) +# +# # Great, now let's read it back +# df <- read_civis(iris_tablename) +# +# # Hmmm, I'm more partial to setosa myself. Let's write a custom sql query. +# # We'll need to wrap our query string in `sql` to let read_civis know we +# # are passing in a sql command rather than a tablename. +# query_str <- paste("SELECT * FROM", iris_tablename, "WHERE Species = 'setosa'") +# iris_setosa <- read_civis(sql(query_str)) +# +# # Now let's store this data along with a note as a serialized R object +# # on a remote file system. We could store any object remotely this way. +# data <- list(data = iris_setosa, special_note = "The best iris species") +# file_id <- write_civis_file(data) +# +# # Finally, let's read back our data from the remote file system. +# data2 <- read_civis(file_id) +# data2[["special_note"]] +# +# ## [1] "The best iris species" + +## ---- eval=FALSE-------------------------------------------------------------- +# library(civis) +# +# # It really is a great dataset +# data(iris) +# +# # Gradient boosting or random forest, who will win? +# gb_model <- civis_ml_gradient_boosting_classifier(iris, "Species") +# rf_model <- civis_ml_random_forest_classifier(iris, "Species") +# macroavgs <- list(gb_model = gb_model$metrics$metrics$roc_auc_macroavg, +# rf_model = rf_model$metrics$metrics$roc_auc_macroavg) +# macroavgs +# +# ## $gb_model +# ## [1] 0.9945333 +# ## +# ## $rf_model +# ## [1] 0.9954667 + diff --git a/doc/quick_start.Rmd b/doc/quick_start.Rmd new file mode 100644 index 00000000..8d4841b0 --- /dev/null +++ b/doc/quick_start.Rmd @@ -0,0 +1,103 @@ +--- +title: "Getting Started" +date: "2017-08-21" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Getting Started} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +## A (Very) Quick Start + +To begin, make sure `civis` is installed and your +[API key is in your R environment](https://civisanalytics.github.io/civis-r/#installation). +You can quickly test that `civis` is working by invoking + +```{r, eval=FALSE} +name <- civis::users_list_me()$name +paste(name, "is really awesome!") +``` + +If `civis` is working, you'll see a friendly message. Otherwise, you might see an +error like this when `civis` wasn't installed properly: + +``` +Error in loadNamespace(name) : there is no package called 'civis' +``` + +or like this if you haven't set your API key correctly: + +``` +Error in api_key() : The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '') +``` + +With `civis`, moving data to and from the cloud takes only a few lines of code. +Your data can be stored as rows in a table, CSVs on remote file system or even +as serialized R objects like nested lists. For example, + +```{r, eval=FALSE} +library(civis) + +# First we'll load a dataframe of the famous iris dataset +data(iris) + +# We'll set a default database and define the table where want to +# store our data +options(civis.default_db = "my_database") +iris_tablename <- "my_schema.my_table" + +# Next we'll push the data to the database table +write_civis(iris, iris_tablename) + +# Great, now let's read it back +df <- read_civis(iris_tablename) + +# Hmmm, I'm more partial to setosa myself. Let's write a custom sql query. +# We'll need to wrap our query string in `sql` to let read_civis know we +# are passing in a sql command rather than a tablename. +query_str <- paste("SELECT * FROM", iris_tablename, "WHERE Species = 'setosa'") +iris_setosa <- read_civis(sql(query_str)) + +# Now let's store this data along with a note as a serialized R object +# on a remote file system. We could store any object remotely this way. +data <- list(data = iris_setosa, special_note = "The best iris species") +file_id <- write_civis_file(data) + +# Finally, let's read back our data from the remote file system. +data2 <- read_civis(file_id) +data2[["special_note"]] + +## [1] "The best iris species" +``` + +`civis` also includes functionality for working with CivisML, Civis' machine +learning ecosystem. With the combined power of CivisML and `civis`, you can build +models in the cloud where the models can use as much memory as they need and +there’s no chance of your laptop crashing. + +```{r, eval=FALSE} +library(civis) + +# It really is a great dataset +data(iris) + +# Gradient boosting or random forest, who will win? +gb_model <- civis_ml_gradient_boosting_classifier(iris, "Species") +rf_model <- civis_ml_random_forest_classifier(iris, "Species") +macroavgs <- list(gb_model = gb_model$metrics$metrics$roc_auc_macroavg, + rf_model = rf_model$metrics$metrics$roc_auc_macroavg) +macroavgs + +## $gb_model +## [1] 0.9945333 +## +## $rf_model +## [1] 0.9954667 +``` + +For a comprehensive list of functions in `civis`, see +[Reference](https://civisanalytics.github.io/civis-r/reference/index.html) in the [full +documentation](https://civisanalytics.github.io/civis-r). The full documentation also +includes a set of `Articles` for detailed documentation on common workflows, including +[data manipulation](https://civisanalytics.github.io/civis-r/articles/data_import_and_export.html) and [building models in parallel](https://civisanalytics.github.io/civis-r/articles/concurrency.html). \ No newline at end of file diff --git a/doc/quick_start.html b/doc/quick_start.html new file mode 100644 index 00000000..e68d280c --- /dev/null +++ b/doc/quick_start.html @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + +Getting Started + + + + + + + + + + + + + + + + + + + + + + + + + + +

Getting Started

+

2017-08-21

+ + + +
+

A (Very) Quick Start

+

To begin, make sure civis is installed and your API key is +in your R environment. You can quickly test that civis +is working by invoking

+
name <- civis::users_list_me()$name
+paste(name, "is really awesome!")
+

If civis is working, you’ll see a friendly message. +Otherwise, you might see an error like this when civis +wasn’t installed properly:

+
Error in loadNamespace(name) : there is no package called 'civis'
+

or like this if you haven’t set your API key correctly:

+
Error in api_key() : The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '<api_key>')
+

With civis, moving data to and from the cloud takes only +a few lines of code. Your data can be stored as rows in a table, CSVs on +remote file system or even as serialized R objects like nested lists. +For example,

+
library(civis)
+
+# First we'll load a dataframe of the famous iris dataset
+data(iris)
+
+# We'll set a default database and define the table where want to
+# store our data
+options(civis.default_db = "my_database")
+iris_tablename <- "my_schema.my_table"
+
+# Next we'll push the data to the database table
+write_civis(iris, iris_tablename)
+
+# Great, now let's read it back
+df <- read_civis(iris_tablename)
+
+# Hmmm, I'm more partial to setosa myself. Let's write a custom sql query.
+# We'll need to wrap our query string in `sql` to let read_civis know we
+# are passing in a sql command rather than a tablename.
+query_str <- paste("SELECT * FROM", iris_tablename, "WHERE Species = 'setosa'")
+iris_setosa <- read_civis(sql(query_str))
+
+# Now let's store this data along with a note as a serialized R object
+# on a remote file system. We could store any object remotely this way.
+data <- list(data = iris_setosa, special_note = "The best iris species")
+file_id <- write_civis_file(data)
+
+# Finally, let's read back our data from the remote file system.
+data2 <- read_civis(file_id)
+data2[["special_note"]]
+
+## [1] "The best iris species"
+

civis also includes functionality for working with +CivisML, Civis’ machine learning ecosystem. With the combined power of +CivisML and civis, you can build models in the cloud where +the models can use as much memory as they need and there’s no chance of +your laptop crashing.

+
library(civis)
+
+# It really is a great dataset
+data(iris)
+
+# Gradient boosting or random forest, who will win?
+gb_model <- civis_ml_gradient_boosting_classifier(iris, "Species")
+rf_model <- civis_ml_random_forest_classifier(iris, "Species")
+macroavgs <- list(gb_model = gb_model$metrics$metrics$roc_auc_macroavg,
+                  rf_model = rf_model$metrics$metrics$roc_auc_macroavg)
+macroavgs
+
+## $gb_model
+## [1] 0.9945333
+## 
+## $rf_model
+## [1] 0.9954667
+

For a comprehensive list of functions in civis, see Reference +in the full +documentation. The full documentation also includes a set of +Articles for detailed documentation on common workflows, +including data +manipulation and building +models in parallel.

+
+ + + + + + + + + + + From 8132f37ca922707928de6b7ef5e9511f87958530 Mon Sep 17 00:00:00 2001 From: pcooman Date: Mon, 13 Feb 2023 14:59:30 -0600 Subject: [PATCH 04/22] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89ea546b..1f3d90ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ ### Changed - Deprecated the `local` argument to the `CivisFuture()` function. The `local` argument is defunct for `future` versions > 1.31.0. +- Regenerated the default Client (`R/generated_client.R`) ## [3.0.0] - 2020-06-22 From 4bd82f37c231846abf2bb27326fb50d58e90c267 Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 10:21:59 -0600 Subject: [PATCH 05/22] upper bound future to v1.31.0 --- DESCRIPTION | 1 + 1 file changed, 1 insertion(+) diff --git a/DESCRIPTION b/DESCRIPTION index 6a207e40..a6dc84bf 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -26,6 +26,7 @@ URL: https://github.com/civisanalytics/civis-r BugReports: https://github.com/civisanalytics/civis-r/issues Imports: future (>= 1.8.0), + future (<= 1.31.0), httr, jsonlite, methods, From 78905095fe8597e534ae8c9cf65ee792fc5945d7 Mon Sep 17 00:00:00 2001 From: Bryan Baird Date: Tue, 14 Feb 2023 13:21:12 -0500 Subject: [PATCH 06/22] Patch fetch_all pagination helper (#243) * Patch fetch_all pagination helper The `fetch_all` function is intended to call `fetch_until` until a result of `FALSE` is returned. However, when the API returns no result, that output manifests as a NULL, not a boolean `FALSE` value. This change prevents a coercion error of trying to boolean compare a NULL value in R. * ADD Bryan Baird as a contributor * Update DESCRIPTION --------- Co-authored-by: pcooman --- DESCRIPTION | 3 ++- R/pagination_helpers.R | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index a6dc84bf..bed4bd00 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -14,7 +14,8 @@ Authors@R: c( person("Elizabeth", "Sander", email = "lsander@civisanalytics.com", role = "ctb"), person("Madison", "Hobbs", email = "mhobbs@g.hmc.edu", role = "ctb"), person("Anna", "Bladey", email = "abladey@civisanalytics.com", role = "ctb"), - person("Sahil", "Shah", email = "sshah2@civisanalytics.com", role = "ctb")) + person("Sahil", "Shah", email = "sshah2@civisanalytics.com", role = "ctb"), + person("Bryan", "Baird", email = "bbaird@civisanalytics.com", role = "ctb")) Description: A convenient interface for making requests directly to the 'Civis Platform API' . Full documentation available 'here' . diff --git a/R/pagination_helpers.R b/R/pagination_helpers.R index 6ea78566..f4b3cb25 100644 --- a/R/pagination_helpers.R +++ b/R/pagination_helpers.R @@ -41,7 +41,7 @@ next_page <- function(response) { #' column_names <- columns %>% purrr::map_chr("name") #' } fetch_all <- function(fn, ...) { - fetch_until(fn, function(x) x == FALSE, ...) + fetch_until(fn, function(x) is.null(x), ...) } #' Retrieve some results from a paginated endpoint From d441211d55d2c41af31576547b2d90a9662a02d9 Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 12:29:36 -0600 Subject: [PATCH 07/22] Update CHANGELOG.md --- CHANGELOG.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f3d90ed..03290267 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,12 +2,18 @@ ## Unreleased -## [3.0.1] +## [3.1.0] ### Changed -- Deprecated the `local` argument to the `CivisFuture()` function. The `local` argument is defunct for `future` versions > 1.31.0. +- Deprecated the `local` argument to the `CivisFuture()` function. The `local` argument +is defunct for `future` versions > 1.31.0. - Regenerated the default Client (`R/generated_client.R`) +### Fixed +- `fetch_until` will now fetch results from the API until the result is empty (i.e. returns +NULL) instead of FALSE. This change prevents a coercion error of trying to boolean compare +a NULL value in R. (#243) + ## [3.0.0] - 2020-06-22 ### Changed From 2a304fbf63ddd73a3a3d6e7cc26dff5886a8b7b3 Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 13:37:32 -0600 Subject: [PATCH 08/22] fix typos --- R/civis_future.R | 2 +- R/io.R | 2 +- man/CivisFuture.Rd | 2 +- man/multipart_upload.Rd | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/R/civis_future.R b/R/civis_future.R index 6a86f8ba..06aab541 100644 --- a/R/civis_future.R +++ b/R/civis_future.R @@ -168,7 +168,7 @@ cancel.CivisFuture <- function(future, ...) { } #' @export -#' @describeIn CivisFuture Check if a CivisFutre has resolved +#' @describeIn CivisFuture Check if a CivisFuture has resolved resolved.CivisFuture <- function(future, ...){ if (!is.null(future$job$containerId)) { future$state <- scripts_get_containers_runs(id = future$job$containerId, diff --git a/R/io.R b/R/io.R index 7a27b951..3318fb16 100644 --- a/R/io.R +++ b/R/io.R @@ -806,7 +806,7 @@ start_scripted_sql_job <- function(database, sql, job_name, hidden = TRUE, #' #' If a future::plan has been set, will be carried out in parallel. #' @param file the file -#' @param name name of the upload, defualts to +#' @param name name of the upload, defaults to #' @param chunk_size size of the chunks in bytes #' @param expires_at when the file expires (default never). #' diff --git a/man/CivisFuture.Rd b/man/CivisFuture.Rd index ce5c428a..fb5ef663 100644 --- a/man/CivisFuture.Rd +++ b/man/CivisFuture.Rd @@ -96,7 +96,7 @@ Evaluate an expression in Civis Platform \item \code{cancel(CivisFuture)}: Cancel a CivisFuture -\item \code{resolved(CivisFuture)}: Check if a CivisFutre has resolved +\item \code{resolved(CivisFuture)}: Check if a CivisFuture has resolved \item \code{fetch_logs(CivisFuture)}: Fetch logs from a CivisFuture diff --git a/man/multipart_upload.Rd b/man/multipart_upload.Rd index 7fe4c6d6..b7b7c02e 100644 --- a/man/multipart_upload.Rd +++ b/man/multipart_upload.Rd @@ -9,7 +9,7 @@ multipart_upload(file, name = "", chunk_size = 32 * 1024, expires_at = NULL) \arguments{ \item{file}{the file} -\item{name}{name of the upload, defualts to} +\item{name}{name of the upload, defaults to} \item{chunk_size}{size of the chunks in bytes} From 4fd8808115db11f99b179120dcbe3c0cc31ffb59 Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 13:52:07 -0600 Subject: [PATCH 09/22] DEP LazyData (no data directory) --- DESCRIPTION | 1 - 1 file changed, 1 deletion(-) diff --git a/DESCRIPTION b/DESCRIPTION index bed4bd00..14f5fde8 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -22,7 +22,6 @@ Description: A convenient interface for making Depends: R (>= 3.2.0) License: BSD_3_clause + file LICENSE -LazyData: true URL: https://github.com/civisanalytics/civis-r BugReports: https://github.com/civisanalytics/civis-r/issues Imports: From 568eb4b526d31109225d572790e672278f751d6a Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 14:09:06 -0600 Subject: [PATCH 10/22] rename CHANGELOG.md to NEWS.md --- CHANGELOG.md => NEWS.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename CHANGELOG.md => NEWS.md (100%) diff --git a/CHANGELOG.md b/NEWS.md similarity index 100% rename from CHANGELOG.md rename to NEWS.md From 1d0a43f820781534d7fd63a56761b4cec4c84200 Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 14:52:34 -0600 Subject: [PATCH 11/22] add cran-comments --- .Rbuildignore | 2 +- cran-comments.md | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 cran-comments.md diff --git a/.Rbuildignore b/.Rbuildignore index 1faae671..214dca9d 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -8,7 +8,7 @@ ^docs$ ^CODE_OF_CONDUCT\.md$ ^_pkgdown\.yaml$ -^CHANGELOG\.md$ +^NEWS\.md$ ^cran-comments\.md$ ^doc$ ^Meta$ diff --git a/cran-comments.md b/cran-comments.md new file mode 100644 index 00000000..858617db --- /dev/null +++ b/cran-comments.md @@ -0,0 +1,5 @@ +## R CMD check results + +0 errors | 0 warnings | 1 note + +* This is a new release. From b389b98a9c1914f99f9d68509f62a43fd907b5d5 Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 15:24:47 -0600 Subject: [PATCH 12/22] Update cran-comments.md --- cran-comments.md | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/cran-comments.md b/cran-comments.md index 858617db..4f19bf44 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,5 +1,33 @@ ## R CMD check results -0 errors | 0 warnings | 1 note +0 errors | 2 warnings | 2 notes * This is a new release. + +* checking CRAN incoming feasibility ... NOTE + * New maintainer: Peter Cooman + * Old maintainer: Patrick Miller + +* checking whether package ‘civis’ can be installed ... WARNING + * `default_credential` masks `civis::default_credential()`. + * `get_database_id` masks `civis::get_database_id()`. + * `sql` masks `civis::sql()`. + + --> the civis:: functions overwrite themselves. There are no actual conflicts. + +* checking DESCRIPTION meta-information ... NOTE + * Package listed in more than one of Depends, Imports, Suggests, Enhances: + ‘future’ + + --> 'future' is listed twice in the Imports section: once with a lower version bound and a second time with an upper version boud: + ``` + Imports: + future (>= 1.8.0), + future (<= 1.31.0), + ``` + The 'future' dependency does not appear in the other sections (Depends, Suggests or Enhances), so I'm not sure why this Note appeared. Please let me know if there is a better way to specify a valid version range for a dependency. + +* checking top-level files ... WARNING + * A complete check needs the 'checkbashisms' script. + + --> I believe this Warning only appears because I am running the checks on my local system. I did not see this Warning when I ran `devtools::check_rhub()` or `devtools::check_win_release()` \ No newline at end of file From 9644e1bd158dbb77e9b76c556d1dec610b62a4bc Mon Sep 17 00:00:00 2001 From: pcooman Date: Tue, 14 Feb 2023 15:28:44 -0600 Subject: [PATCH 13/22] add CRAN submission info --- .Rbuildignore | 1 + CRAN-SUBMISSION | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 CRAN-SUBMISSION diff --git a/.Rbuildignore b/.Rbuildignore index 214dca9d..02247199 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -12,3 +12,4 @@ ^cran-comments\.md$ ^doc$ ^Meta$ +^CRAN-SUBMISSION$ diff --git a/CRAN-SUBMISSION b/CRAN-SUBMISSION new file mode 100644 index 00000000..13d80d3b --- /dev/null +++ b/CRAN-SUBMISSION @@ -0,0 +1,3 @@ +Version: 3.1.0 +Date: 2023-02-14 21:28:04 UTC +SHA: b389b98a9c1914f99f9d68509f62a43fd907b5d5 From 21ee7d8cbcc3a394814a2e6a217432d07608809a Mon Sep 17 00:00:00 2001 From: pcooman Date: Thu, 16 Feb 2023 09:30:44 -0600 Subject: [PATCH 14/22] only upper bound `future` package --- DESCRIPTION | 1 - 1 file changed, 1 deletion(-) diff --git a/DESCRIPTION b/DESCRIPTION index 14f5fde8..20eef99c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -25,7 +25,6 @@ License: BSD_3_clause + file LICENSE URL: https://github.com/civisanalytics/civis-r BugReports: https://github.com/civisanalytics/civis-r/issues Imports: - future (>= 1.8.0), future (<= 1.31.0), httr, jsonlite, From 543a1dd6e384f0c4dfc16aaec5a4c4d3225f76ee Mon Sep 17 00:00:00 2001 From: pcooman Date: Fri, 17 Feb 2023 09:14:09 -0600 Subject: [PATCH 15/22] update links in vignettes --- README.md | 4 ++-- doc/civis_ml.Rmd | 22 +++++++++++----------- doc/civis_ml.html | 22 +++++++++++----------- doc/civis_scripts.html | 4 ++-- doc/quick_start.Rmd | 2 +- doc/quick_start.html | 2 +- vignettes/civis_ml.Rmd | 22 +++++++++++----------- vignettes/quick_start.Rmd | 2 +- 8 files changed, 40 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 49a4c751..0f11b44f 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ the Civis Platform API. ## Documentation -The full documentation is hosted [here](https://civisanalytics.github.io/civis-r). The fastest way to get started is with the [getting started guide](https://civisanalytics.github.io/civis-r/articles/quick_start.html). +The full documentation is hosted [here](https://civisanalytics.github.io/civis-r/). The fastest way to get started is with the [getting started guide](https://civisanalytics.github.io/civis-r/articles/quick_start.html). ## API Keys @@ -172,4 +172,4 @@ person("FirstName", "LastName", email = "email@email.com", role = "ctb") ``` This project is intended to be a safe, welcoming space for collaboration, and -contributors are expected to adhere to the [Contributor Covenant](http://www.contributor-covenant.org) code of conduct. +contributors are expected to adhere to the [Contributor Covenant](http://www.contributor-covenant.org/) code of conduct. diff --git a/doc/civis_ml.Rmd b/doc/civis_ml.Rmd index 0887554e..70770b26 100644 --- a/doc/civis_ml.Rmd +++ b/doc/civis_ml.Rmd @@ -13,7 +13,7 @@ There are so many models to build! When this becomes challenging on a local mach [CivisML](https://medium.com/civis-analytics/civisml-scikit-learn-at-scale-b01b496916ea) is a machine learning service on Civis Platform that makes this as painless as possible. You can fit many different models, do extensive hyperparameter tuning, and score data sets with millions of observations stored in remote databases. Once these models are built, they live in Civis Platform permanently and can be included into production pipelines. Results can be easily incorporated into reports and dashboards. -CivisML is built in Python using [scikit-learn](http://scikit-learn.org/stable/), and leverages AWS behind the scenes for efficient distributed computing. However, most of its features can be used through R without knowledge of Python or AWS with the `civis_ml` function in `civis`. +CivisML is built in Python using [scikit-learn](https://scikit-learn.org/stable/), and leverages AWS behind the scenes for efficient distributed computing. However, most of its features can be used through R without knowledge of Python or AWS with the `civis_ml` function in `civis`. While `civis_ml` is a complex function with many arguments, basic machine learning modeling and scoring can be easily carried out. We illustrate several features of `civis_ml` with data from a fictitious company called Brandable, who is looking to predict which customers are likely to upgrade from the free to the premium service. @@ -46,17 +46,17 @@ After the data source is specified, we next choose the model type. There are 13 | Name | R Workflow | Model Type | scikit-learn Documentation | |------|:-----------|------------|-----------|------------------| - `sparse_logistic` | `civis_ml_sparse_logistic` | classification | [Logistic Regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) | - `gradient_boosting_classifier` | `civis_ml_gradient_boosting_classifier` | classification | [GradientBoostingClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) | - `random_forest_classifier` | `civis_ml_random_forest_classifier` | classification | [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) | - `extra_trees_classifier` | `civis_ml_extra_trees_classifier` | classification | [ExtraTreesClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html)| + `sparse_logistic` | `civis_ml_sparse_logistic` | classification | [Logistic Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) | + `gradient_boosting_classifier` | `civis_ml_gradient_boosting_classifier` | classification | [GradientBoostingClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) | + `random_forest_classifier` | `civis_ml_random_forest_classifier` | classification | [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) | + `extra_trees_classifier` | `civis_ml_extra_trees_classifier` | classification | [ExtraTreesClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html)| `multilayer_perceptron_classifier` | | classification | [muffnn.MLPClassifier](https://github.com/civisanalytics/muffnn) | `stacking_classifier` | | classification | [StackedClassifier](https://github.com/civisanalytics/civisml-extensions) - `sparse_linear_regressor` | `civis_ml_sparse_linear_regressor` | regression | [LinearRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) | - `sparse_ridge_regressor` | `civis_ml_sparse_ridge_regressor` | regression | [Ridge](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) | - `gradient_boosting_regressor` | `civis_ml_gradient_boosting_regressor` | regression | [GradientBoostingRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) | - `random_forest_regressor` | `civis_ml_random_forest_regressor` | regression | [RandomForestRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html)| - `extra_trees_regressor` | `civis_ml_extra_trees_regressor` | regression | [ExtraTreesRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html)| + `sparse_linear_regressor` | `civis_ml_sparse_linear_regressor` | regression | [LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) | + `sparse_ridge_regressor` | `civis_ml_sparse_ridge_regressor` | regression | [Ridge](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) | + `gradient_boosting_regressor` | `civis_ml_gradient_boosting_regressor` | regression | [GradientBoostingRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) | + `random_forest_regressor` | `civis_ml_random_forest_regressor` | regression | [RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html)| + `extra_trees_regressor` | `civis_ml_extra_trees_regressor` | regression | [ExtraTreesRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html)| `multilayer_perceptron_regressor` | | regression | [muffnn.MLPRegressor](https://github.com/civisanalytics/muffnn) | `stacking_regressor` | | regression | [StackedRegressor](https://github.com/civisanalytics/civisml-extensions) @@ -132,7 +132,7 @@ between .01 and 1, and with a mean close to .1. The `"stacking_classifier"` model stacks together the `"gradient_boosting_classifier"` and `"random_forest_classifier"` predefined models together with a `glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, -scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, +scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2')`, combining them using [NonNegativeLinearRegression](https://github.com/civisanalytics/civisml-extensions). ```{r, eval = FALSE} diff --git a/doc/civis_ml.html b/doc/civis_ml.html index 82ac646b..7b18940f 100644 --- a/doc/civis_ml.html +++ b/doc/civis_ml.html @@ -354,7 +354,7 @@

2018-1-18

Civis Platform permanently and can be included into production pipelines. Results can be easily incorporated into reports and dashboards.

-

CivisML is built in Python using scikit-learn, and leverages +

CivisML is built in Python using scikit-learn, and leverages AWS behind the scenes for efficient distributed computing. However, most of its features can be used through R without knowledge of Python or AWS with the civis_ml function in civis.

@@ -418,7 +418,7 @@

Modeling

sparse_logistic civis_ml_sparse_logistic classification -Logistic +Logistic Regression @@ -426,21 +426,21 @@

Modeling

gradient_boosting_classifier civis_ml_gradient_boosting_classifier classification -GradientBoostingClassifier +GradientBoostingClassifier random_forest_classifier civis_ml_random_forest_classifier classification -RandomForestClassifier +RandomForestClassifier extra_trees_classifier civis_ml_extra_trees_classifier classification -ExtraTreesClassifier +ExtraTreesClassifier @@ -461,35 +461,35 @@

Modeling

sparse_linear_regressor civis_ml_sparse_linear_regressor regression -LinearRegression +LinearRegression sparse_ridge_regressor civis_ml_sparse_ridge_regressor regression -Ridge +Ridge gradient_boosting_regressor civis_ml_gradient_boosting_regressor regression -GradientBoostingRegressor +GradientBoostingRegressor random_forest_regressor civis_ml_random_forest_regressor regression -RandomForestRegressor +RandomForestRegressor extra_trees_regressor civis_ml_extra_trees_regressor regression -ExtraTreesRegressor +ExtraTreesRegressor @@ -645,7 +645,7 @@

Stacking

a glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='log_loss'). Defaults for the predefined models are documented in -?civis_ml. Each column is first standardized, +?civis_ml. Each column is first standardized, and then the model predictions are combined using LogisticRegressionCV with penalty='l2' and tol=1e-08. The "stacking_regressor" works similarly, stacking together the diff --git a/doc/civis_scripts.html b/doc/civis_scripts.html index f07e76a6..5eaf1497 100644 --- a/doc/civis_scripts.html +++ b/doc/civis_scripts.html @@ -12,7 +12,7 @@ - + Productionizing with Civis Scripts @@ -340,7 +340,7 @@

Productionizing with Civis Scripts

Patrick Miller

-

2023-02-13

+

2023-02-17

diff --git a/doc/quick_start.Rmd b/doc/quick_start.Rmd index 8d4841b0..00ec7950 100644 --- a/doc/quick_start.Rmd +++ b/doc/quick_start.Rmd @@ -98,6 +98,6 @@ macroavgs For a comprehensive list of functions in `civis`, see [Reference](https://civisanalytics.github.io/civis-r/reference/index.html) in the [full -documentation](https://civisanalytics.github.io/civis-r). The full documentation also +documentation](https://civisanalytics.github.io/civis-r/). The full documentation also includes a set of `Articles` for detailed documentation on common workflows, including [data manipulation](https://civisanalytics.github.io/civis-r/articles/data_import_and_export.html) and [building models in parallel](https://civisanalytics.github.io/civis-r/articles/concurrency.html). \ No newline at end of file diff --git a/doc/quick_start.html b/doc/quick_start.html index e68d280c..d21c4c2d 100644 --- a/doc/quick_start.html +++ b/doc/quick_start.html @@ -414,7 +414,7 @@

A (Very) Quick Start

## $rf_model ## [1] 0.9954667

For a comprehensive list of functions in civis, see Reference -in the full +in the full documentation. The full documentation also includes a set of Articles for detailed documentation on common workflows, including data diff --git a/vignettes/civis_ml.Rmd b/vignettes/civis_ml.Rmd index 0887554e..70770b26 100644 --- a/vignettes/civis_ml.Rmd +++ b/vignettes/civis_ml.Rmd @@ -13,7 +13,7 @@ There are so many models to build! When this becomes challenging on a local mach [CivisML](https://medium.com/civis-analytics/civisml-scikit-learn-at-scale-b01b496916ea) is a machine learning service on Civis Platform that makes this as painless as possible. You can fit many different models, do extensive hyperparameter tuning, and score data sets with millions of observations stored in remote databases. Once these models are built, they live in Civis Platform permanently and can be included into production pipelines. Results can be easily incorporated into reports and dashboards. -CivisML is built in Python using [scikit-learn](http://scikit-learn.org/stable/), and leverages AWS behind the scenes for efficient distributed computing. However, most of its features can be used through R without knowledge of Python or AWS with the `civis_ml` function in `civis`. +CivisML is built in Python using [scikit-learn](https://scikit-learn.org/stable/), and leverages AWS behind the scenes for efficient distributed computing. However, most of its features can be used through R without knowledge of Python or AWS with the `civis_ml` function in `civis`. While `civis_ml` is a complex function with many arguments, basic machine learning modeling and scoring can be easily carried out. We illustrate several features of `civis_ml` with data from a fictitious company called Brandable, who is looking to predict which customers are likely to upgrade from the free to the premium service. @@ -46,17 +46,17 @@ After the data source is specified, we next choose the model type. There are 13 | Name | R Workflow | Model Type | scikit-learn Documentation | |------|:-----------|------------|-----------|------------------| - `sparse_logistic` | `civis_ml_sparse_logistic` | classification | [Logistic Regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) | - `gradient_boosting_classifier` | `civis_ml_gradient_boosting_classifier` | classification | [GradientBoostingClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) | - `random_forest_classifier` | `civis_ml_random_forest_classifier` | classification | [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) | - `extra_trees_classifier` | `civis_ml_extra_trees_classifier` | classification | [ExtraTreesClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html)| + `sparse_logistic` | `civis_ml_sparse_logistic` | classification | [Logistic Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) | + `gradient_boosting_classifier` | `civis_ml_gradient_boosting_classifier` | classification | [GradientBoostingClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) | + `random_forest_classifier` | `civis_ml_random_forest_classifier` | classification | [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) | + `extra_trees_classifier` | `civis_ml_extra_trees_classifier` | classification | [ExtraTreesClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html)| `multilayer_perceptron_classifier` | | classification | [muffnn.MLPClassifier](https://github.com/civisanalytics/muffnn) | `stacking_classifier` | | classification | [StackedClassifier](https://github.com/civisanalytics/civisml-extensions) - `sparse_linear_regressor` | `civis_ml_sparse_linear_regressor` | regression | [LinearRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) | - `sparse_ridge_regressor` | `civis_ml_sparse_ridge_regressor` | regression | [Ridge](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) | - `gradient_boosting_regressor` | `civis_ml_gradient_boosting_regressor` | regression | [GradientBoostingRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) | - `random_forest_regressor` | `civis_ml_random_forest_regressor` | regression | [RandomForestRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html)| - `extra_trees_regressor` | `civis_ml_extra_trees_regressor` | regression | [ExtraTreesRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html)| + `sparse_linear_regressor` | `civis_ml_sparse_linear_regressor` | regression | [LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) | + `sparse_ridge_regressor` | `civis_ml_sparse_ridge_regressor` | regression | [Ridge](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) | + `gradient_boosting_regressor` | `civis_ml_gradient_boosting_regressor` | regression | [GradientBoostingRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) | + `random_forest_regressor` | `civis_ml_random_forest_regressor` | regression | [RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html)| + `extra_trees_regressor` | `civis_ml_extra_trees_regressor` | regression | [ExtraTreesRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html)| `multilayer_perceptron_regressor` | | regression | [muffnn.MLPRegressor](https://github.com/civisanalytics/muffnn) | `stacking_regressor` | | regression | [StackedRegressor](https://github.com/civisanalytics/civisml-extensions) @@ -132,7 +132,7 @@ between .01 and 1, and with a mean close to .1. The `"stacking_classifier"` model stacks together the `"gradient_boosting_classifier"` and `"random_forest_classifier"` predefined models together with a `glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, -scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, +scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2')`, combining them using [NonNegativeLinearRegression](https://github.com/civisanalytics/civisml-extensions). ```{r, eval = FALSE} diff --git a/vignettes/quick_start.Rmd b/vignettes/quick_start.Rmd index 8d4841b0..00ec7950 100644 --- a/vignettes/quick_start.Rmd +++ b/vignettes/quick_start.Rmd @@ -98,6 +98,6 @@ macroavgs For a comprehensive list of functions in `civis`, see [Reference](https://civisanalytics.github.io/civis-r/reference/index.html) in the [full -documentation](https://civisanalytics.github.io/civis-r). The full documentation also +documentation](https://civisanalytics.github.io/civis-r/). The full documentation also includes a set of `Articles` for detailed documentation on common workflows, including [data manipulation](https://civisanalytics.github.io/civis-r/articles/data_import_and_export.html) and [building models in parallel](https://civisanalytics.github.io/civis-r/articles/concurrency.html). \ No newline at end of file From 80201735c8be5a8ee9183827fd2b21612cdd76f3 Mon Sep 17 00:00:00 2001 From: pcooman Date: Fri, 17 Feb 2023 09:34:42 -0600 Subject: [PATCH 16/22] update last remaining links --- README.md | 2 +- doc/civis_ml.Rmd | 2 +- doc/civis_ml.html | 2 +- vignettes/civis_ml.Rmd | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0f11b44f..ba78fb59 100644 --- a/README.md +++ b/README.md @@ -172,4 +172,4 @@ person("FirstName", "LastName", email = "email@email.com", role = "ctb") ``` This project is intended to be a safe, welcoming space for collaboration, and -contributors are expected to adhere to the [Contributor Covenant](http://www.contributor-covenant.org/) code of conduct. +contributors are expected to adhere to the [Contributor Covenant](https://www.contributor-covenant.org/) code of conduct. diff --git a/doc/civis_ml.Rmd b/doc/civis_ml.Rmd index 70770b26..7f85b511 100644 --- a/doc/civis_ml.Rmd +++ b/doc/civis_ml.Rmd @@ -132,7 +132,7 @@ between .01 and 1, and with a mean close to .1. The `"stacking_classifier"` model stacks together the `"gradient_boosting_classifier"` and `"random_forest_classifier"` predefined models together with a `glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, -scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, +scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2')`, combining them using [NonNegativeLinearRegression](https://github.com/civisanalytics/civisml-extensions). ```{r, eval = FALSE} diff --git a/doc/civis_ml.html b/doc/civis_ml.html index 7b18940f..83faf6b3 100644 --- a/doc/civis_ml.html +++ b/doc/civis_ml.html @@ -646,7 +646,7 @@

Stacking

glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='log_loss'). Defaults for the predefined models are documented in ?civis_ml. Each column is first standardized, -and then the model predictions are combined using LogisticRegressionCV +and then the model predictions are combined using LogisticRegressionCV with penalty='l2' and tol=1e-08. The "stacking_regressor" works similarly, stacking together the "gradient_boosting_regressor" and diff --git a/vignettes/civis_ml.Rmd b/vignettes/civis_ml.Rmd index 70770b26..7f85b511 100644 --- a/vignettes/civis_ml.Rmd +++ b/vignettes/civis_ml.Rmd @@ -132,7 +132,7 @@ between .01 and 1, and with a mean close to .1. The `"stacking_classifier"` model stacks together the `"gradient_boosting_classifier"` and `"random_forest_classifier"` predefined models together with a `glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, -scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, +scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2')`, combining them using [NonNegativeLinearRegression](https://github.com/civisanalytics/civisml-extensions). ```{r, eval = FALSE} From 0786c7399b41a5b5dad3d2c81ef04dc0a64d2a1e Mon Sep 17 00:00:00 2001 From: pcooman Date: Wed, 22 Feb 2023 15:55:48 -0600 Subject: [PATCH 17/22] Set future$state to "finished" if API call "succeeded" (#251) * DEP set future$state to "finished" if API job "succeeded" * increment version to v3.1.1 --- DESCRIPTION | 4 ++-- NEWS.md | 7 +++++++ R/civis_future.R | 25 +++++++++++++++++++++---- tests/testthat/test_civis_future.R | 2 +- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 20eef99c..171ff3ce 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: civis Title: R Client for the 'Civis Platform API' -Version: 3.1.0 +Version: 3.1.1 Authors@R: c( person("Peter", "Cooman", email = "pcooman@civisanalytics.com", role = c("cre", "ctb")), person("Patrick", "Miller", email = "pmiller@civisanalytics.com", role = "aut"), @@ -25,7 +25,7 @@ License: BSD_3_clause + file LICENSE URL: https://github.com/civisanalytics/civis-r BugReports: https://github.com/civisanalytics/civis-r/issues Imports: - future (<= 1.31.0), + future (>= 1.8.0), httr, jsonlite, methods, diff --git a/NEWS.md b/NEWS.md index 03290267..9a2188ff 100644 --- a/NEWS.md +++ b/NEWS.md @@ -2,6 +2,13 @@ ## Unreleased +## [3.1.1] + +### Changed +- As of `future` > 1.31.0, "succeeded" is no longer recognized as a valid state for a +future. When the Civis API returns a "success" or "succeeded" status, we now map this +to a "finished" state for `future` objects. + ## [3.1.0] ### Changed diff --git a/R/civis_future.R b/R/civis_future.R index 06aab541..a4f6561e 100644 --- a/R/civis_future.R +++ b/R/civis_future.R @@ -138,7 +138,16 @@ result.CivisFuture <- function(future, ...) { tryCatch({ future$run <- await(scripts_get_containers_runs, id = future$job$containerId, run_id = future$job$id) - future$state <- future$run$state + civis_state <- future$run$state + + # `future` > v1.31.0 no longer recognizes "success"/"succeeded" as a resolved state + # See Issue #245 + if (civis_state %in% c("success", "succeeded")) { + future$state <- "finished" + } else { + future$state <- civis_state + } + value <- read_civis(civis_script(future$job$containerId), using = readRDS)[[1]] future$result <- future::FutureResult(value=value) @@ -171,10 +180,18 @@ cancel.CivisFuture <- function(future, ...) { #' @describeIn CivisFuture Check if a CivisFuture has resolved resolved.CivisFuture <- function(future, ...){ if (!is.null(future$job$containerId)) { - future$state <- scripts_get_containers_runs(id = future$job$containerId, - run_id = future$job$id)$state + civis_state <- scripts_get_containers_runs(id = future$job$containerId, + run_id = future$job$id)$state + + # `future` > v1.31.0 no longer recognizes "success"/"succeeded" as a resolved state + # See Issue #245 + if (civis_state %in% c("success", "succeeded")) { + future$state <- "finished" + } else { + future$state <- civis_state + } } - future$state %in% c("succeeded", "failed", "cancelled") + future$state %in% c("finished", "failed", "cancelled") } #' @export diff --git a/tests/testthat/test_civis_future.R b/tests/testthat/test_civis_future.R index 535c0d50..86badd77 100644 --- a/tests/testthat/test_civis_future.R +++ b/tests/testthat/test_civis_future.R @@ -61,7 +61,7 @@ test_that("run and value work", { out <- capture.output(res <- mock_run(quote(2 + 3))) expect_equal(res$value, 5) expect_equal(res$fut$logs, list("a log")) - expect_equal(res$fut$state, "succeeded") + expect_equal(res$fut$state, "finished") # shouldn't need to be mocked expect_equal(value(res$fut), res$val) }) From e35cf93b7c5fa20ce2c9ac1a4d6cbcb679c59759 Mon Sep 17 00:00:00 2001 From: pcooman Date: Mon, 27 Feb 2023 10:54:09 -0600 Subject: [PATCH 18/22] update cran-comments for v3.1.1 --- cran-comments.md | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/cran-comments.md b/cran-comments.md index 4f19bf44..4f22f222 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,13 +1,17 @@ ## R CMD check results -0 errors | 2 warnings | 2 notes +0 errors | 2 warnings | 1 note * This is a new release. * checking CRAN incoming feasibility ... NOTE - * New maintainer: Peter Cooman - * Old maintainer: Patrick Miller + * Maintainer: Peter Cooman + Days since last update: 5 + --> This patch update provides a more permanent fix to planned changes in an upcoming + version of the `future` package that would otherwise break our package. We do not + anticipate needing to make any other changes in the near future. + * checking whether package ‘civis’ can be installed ... WARNING * `default_credential` masks `civis::default_credential()`. * `get_database_id` masks `civis::get_database_id()`. @@ -15,18 +19,6 @@ --> the civis:: functions overwrite themselves. There are no actual conflicts. -* checking DESCRIPTION meta-information ... NOTE - * Package listed in more than one of Depends, Imports, Suggests, Enhances: - ‘future’ - - --> 'future' is listed twice in the Imports section: once with a lower version bound and a second time with an upper version boud: - ``` - Imports: - future (>= 1.8.0), - future (<= 1.31.0), - ``` - The 'future' dependency does not appear in the other sections (Depends, Suggests or Enhances), so I'm not sure why this Note appeared. Please let me know if there is a better way to specify a valid version range for a dependency. - * checking top-level files ... WARNING * A complete check needs the 'checkbashisms' script. From 390083cb83dc491e9ee60e16b4277a70bb046a46 Mon Sep 17 00:00:00 2001 From: pcooman Date: Mon, 27 Feb 2023 18:31:35 -0600 Subject: [PATCH 19/22] update NEWS.md --- NEWS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEWS.md b/NEWS.md index 9a2188ff..b28eac75 100644 --- a/NEWS.md +++ b/NEWS.md @@ -9,7 +9,7 @@ future. When the Civis API returns a "success" or "succeeded" status, we now map this to a "finished" state for `future` objects. -## [3.1.0] +## [3.1.0] - 2022-02-22 ### Changed - Deprecated the `local` argument to the `CivisFuture()` function. The `local` argument From b8a7ece639853af9eb7b811bf78b326816a827e2 Mon Sep 17 00:00:00 2001 From: pcooman Date: Thu, 30 Mar 2023 14:30:04 -0500 Subject: [PATCH 20/22] prep release --- DESCRIPTION | 2 +- NAMESPACE | 2 +- NEWS.md | 9 ++++++++- R/civis_future.R | 12 +++++++++++- cran-comments.md | 10 +--------- man/resolved.Rd | 16 ++++++++++++++++ 6 files changed, 38 insertions(+), 13 deletions(-) create mode 100644 man/resolved.Rd diff --git a/DESCRIPTION b/DESCRIPTION index 171ff3ce..ecba992f 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: civis Title: R Client for the 'Civis Platform API' -Version: 3.1.1 +Version: 3.1.2 Authors@R: c( person("Peter", "Cooman", email = "pcooman@civisanalytics.com", role = c("cre", "ctb")), person("Patrick", "Miller", email = "pmiller@civisanalytics.com", role = "aut"), diff --git a/NAMESPACE b/NAMESPACE index e308e924..cfa66c68 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -575,6 +575,7 @@ export(reports_put_services_transfer) export(reports_put_shares_groups) export(reports_put_shares_users) export(reports_put_transfer) +export(resolved) export(roles_list) export(run_civis) export(run_template) @@ -926,7 +927,6 @@ export(write_civis) export(write_civis_file) export(write_job_output) import(memoise) -importFrom(future,resolved) importFrom(future,result) importFrom(future,run) importFrom(methods,is) diff --git a/NEWS.md b/NEWS.md index b28eac75..d80de75b 100644 --- a/NEWS.md +++ b/NEWS.md @@ -2,7 +2,14 @@ ## Unreleased -## [3.1.1] +## [3.1.2] - 2022-03-30 + +### Changed +- Added a generic `resolved()` function (previously imported from the `future` package) and +changed its input argument from `x` to `future`, to stay consistent with the `resolved.CivisFuture()` +function + +## [3.1.1] - 2022-02-28 ### Changed - As of `future` > 1.31.0, "succeeded" is no longer recognized as a valid state for a diff --git a/R/civis_future.R b/R/civis_future.R index a4f6561e..8f0eec56 100644 --- a/R/civis_future.R +++ b/R/civis_future.R @@ -1,4 +1,4 @@ -#' @importFrom future run resolved result +#' @importFrom future run result NULL #' Evaluate an expression in Civis Platform @@ -127,6 +127,7 @@ run.CivisFuture <- function(future, ...) { } #' @export +#' @param future CivisFuture object. #' @describeIn CivisFuture Return the value of a CivisFuture result.CivisFuture <- function(future, ...) { if (future$state == "created") { @@ -176,7 +177,16 @@ cancel.CivisFuture <- function(future, ...) { future$state <- "cancelled" } +#' Check whether a CivisFuture has resolved. +#' @param future CivisFuture object. +#' @param ... unused for CivisFuture. #' @export +resolved <- function(future, ...) { + UseMethod("resolved") +} + +#' @export +#' @param future CivisFuture object. #' @describeIn CivisFuture Check if a CivisFuture has resolved resolved.CivisFuture <- function(future, ...){ if (!is.null(future$job$containerId)) { diff --git a/cran-comments.md b/cran-comments.md index 4f22f222..861f87e4 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,16 +1,8 @@ ## R CMD check results -0 errors | 2 warnings | 1 note +0 errors | 2 warnings | 0 notes * This is a new release. - -* checking CRAN incoming feasibility ... NOTE - * Maintainer: Peter Cooman - Days since last update: 5 - - --> This patch update provides a more permanent fix to planned changes in an upcoming - version of the `future` package that would otherwise break our package. We do not - anticipate needing to make any other changes in the near future. * checking whether package ‘civis’ can be installed ... WARNING * `default_credential` masks `civis::default_credential()`. diff --git a/man/resolved.Rd b/man/resolved.Rd new file mode 100644 index 00000000..439470a3 --- /dev/null +++ b/man/resolved.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/civis_future.R +\name{resolved} +\alias{resolved} +\title{Check whether a CivisFuture has resolved.} +\usage{ +resolved(future, ...) +} +\arguments{ +\item{future}{CivisFuture object.} + +\item{...}{unused for CivisFuture.} +} +\description{ +Check whether a CivisFuture has resolved. +} From ee4c42071ad1384d4bf0334be4c996e0b49399c6 Mon Sep 17 00:00:00 2001 From: Matt Brennan <52160+mattyb@users.noreply.github.com> Date: Mon, 11 Dec 2023 09:54:14 -0500 Subject: [PATCH 21/22] remove files that shouldn't be in version control --- .gitignore | 5 +- CRAN-SUBMISSION | 3 - Meta/vignette.rds | Bin 354 -> 0 bytes doc/civis_ml.R | 172 ------- doc/civis_ml.Rmd | 357 ------------- doc/civis_ml.html | 874 -------------------------------- doc/civis_scripts.R | 199 -------- doc/civis_scripts.Rmd | 382 -------------- doc/civis_scripts.html | 697 ------------------------- doc/concurrency.R | 86 ---- doc/concurrency.Rmd | 157 ------ doc/concurrency.html | 499 ------------------ doc/data_import_and_export.R | 81 --- doc/data_import_and_export.Rmd | 240 --------- doc/data_import_and_export.html | 551 -------------------- doc/quick_start.R | 57 --- doc/quick_start.Rmd | 103 ---- doc/quick_start.html | 441 ---------------- 18 files changed, 3 insertions(+), 4901 deletions(-) delete mode 100644 CRAN-SUBMISSION delete mode 100644 Meta/vignette.rds delete mode 100644 doc/civis_ml.R delete mode 100644 doc/civis_ml.Rmd delete mode 100644 doc/civis_ml.html delete mode 100644 doc/civis_scripts.R delete mode 100644 doc/civis_scripts.Rmd delete mode 100644 doc/civis_scripts.html delete mode 100644 doc/concurrency.R delete mode 100644 doc/concurrency.Rmd delete mode 100644 doc/concurrency.html delete mode 100644 doc/data_import_and_export.R delete mode 100644 doc/data_import_and_export.Rmd delete mode 100644 doc/data_import_and_export.html delete mode 100644 doc/quick_start.R delete mode 100644 doc/quick_start.Rmd delete mode 100644 doc/quick_start.html diff --git a/.gitignore b/.gitignore index 214a6450..18ed2900 100644 --- a/.gitignore +++ b/.gitignore @@ -10,5 +10,6 @@ inst/web .*.swo .DS_Store civis.Rcheck/ -/doc/ -/Meta/ +doc/ +Meta/ +CRAN-SUBMISSION diff --git a/CRAN-SUBMISSION b/CRAN-SUBMISSION deleted file mode 100644 index 13d80d3b..00000000 --- a/CRAN-SUBMISSION +++ /dev/null @@ -1,3 +0,0 @@ -Version: 3.1.0 -Date: 2023-02-14 21:28:04 UTC -SHA: b389b98a9c1914f99f9d68509f62a43fd907b5d5 diff --git a/Meta/vignette.rds b/Meta/vignette.rds deleted file mode 100644 index 2280d0cfdfc22b24d973491caddd96d567295ce5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 354 zcmV-o0iFIIiwFP!000001C>+3PQx$^&Dw5k6BC0SKyaz}fc*nv8bTbH7zb`ul!i5; zO-7PZ%8CC*Vz_Hk(n*ux5XXM8J1T=zHb5g|9gyFOkyNz-*P_>dc%b>zuT-V&*TOK!5 z>*~T~#f0wi7&T%fx*>DYR>L_NyK*@A#U4G}Qdf{I-exlV9ZREqdngTMcO(Wp+cywf z)EWl>lWpso0{X-L&-}mx(=pAv9A{LS!=3?yIZLTccx1W>{Kaf;+>WJxFr#bAWBUgB zceE^ogt*;kAVo2?%|_K<_yn`rY`%6GkrXS-$b!R4!|5};CkoN`58c8s8%+ZM0GkM? Al>h($ diff --git a/doc/civis_ml.R b/doc/civis_ml.R deleted file mode 100644 index 567742b0..00000000 --- a/doc/civis_ml.R +++ /dev/null @@ -1,172 +0,0 @@ -## ---- eval = FALSE------------------------------------------------------------ -# library(civis) -# -# civis_ml(df, ...) -# civis_ml("path/to/data.csv", ...) -# civis_ml(civis_table(table_name = "schema.table", database_name = "database"), ...) -# civis_ml(civis_file(1234), ...) - -## ---- eval = FALSE----------------------------------------------------------- -# options(civis.default_db = "my_database") -# tab <- civis_table(table_name = "sample_project.premium_training_set") - -## ---- eval = FALSE------------------------------------------------------------ -# library(civis) -# tab <- civis_table("sample_project.premium_training_set") -# m <- civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_forest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip") -# -# m <- civis_ml_random_forest_classifier(tab, -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip") - -## ---- eval = FALSE------------------------------------------------------------ -# tab <- civis_table("sample_project.premium_training_set") -# -# # hyperband -# m_hyper <- civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_forest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip", -# cross_validation_parameters = 'hyperband') -# -# # grid search -# cv_params <- list("max_depth" = c(2, 3, 5), -# "n_estimators" = c(50, 100, 500)) -# -# m_grid <- civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_forest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip", -# cross_validation_parameters = cv_params) -# -# - -## ---- eval = FALSE------------------------------------------------------------ -# m_stack <- civis_ml(tab, dependent_variable = "upgrade", -# model_type = "stacking_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip") - -## ---- eval=FALSE-------------------------------------------------------------- -# m - -## ----run_model, eval=FALSE, echo=FALSE---------------------------------------- -# # use this chunk to actually update the model if necessary -# library(civis) -# tab <- civis_table("sample_project.premium_training_set") -# cv_params <- list("max_depth" = c(2, 3, 5), -# "n_estimators" = c(50, 100, 500)) -# -# -# m <- civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_forest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip", -# cross_validation_parameters = cv_params) -# saveRDS(m, file = "../inst/civis_ml_brandable.rds") -# -# oos <- fetch_oos_scores(m) -# saveRDS(oos, file = "../inst/civis_ml_oos.rds") -# -# err_m <- tryCatch({ -# civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_fest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip", -# cross_validation_parameters = cv_params) -# }, error = function(e) e) -# saveRDS(err_m, file = "../inst/civis_ml_err.rds") -# - -## ---- eval=TRUE, echo=FALSE--------------------------------------------------- -library(civis) -path <- system.file("civis_ml_brandable.rds", package = 'civis') -m <- readRDS(path) -m - -## ----------------------------------------------------------------------------- -get_metric(m, "accuracy") -get_metric(m, "confusion_matrix") -get_metric(m, "roc_auc") - -## ---- echo=TRUE, eval=FALSE--------------------------------------------------- -# oos <- fetch_oos_scores(m) -# head(oos) - -## ---- echo=FALSE, eval=TRUE--------------------------------------------------- -path <- system.file("civis_ml_oos.rds", package = 'civis') -oos <- readRDS(path) -head(oos) - -## ---- fig.width = 5----------------------------------------------------------- -plot(m) - -## ----------------------------------------------------------------------------- -hist(m) - -## ---- eval=FALSE-------------------------------------------------------------- -# pred_tab <- civis_table(table_name = "sample_project.brandable_all_users") -# pred_job <- predict(m, newdata = pred_tab, -# output_table = "sample_project.brandable_user_scores") - -## ---- eval=FALSE-------------------------------------------------------------- -# pred_job <- predict(m, newdata = pred_tab, -# output_table = "sample_project.brandable_user_scores", -# n_jobs = 25) - -## ---- eval=FALSE-------------------------------------------------------------- -# yhat <- fetch_predictions(pred_job) - -## ---- eval=FALSE-------------------------------------------------------------- -# # download from S3 -# download_civis(pred_job$model_info$output_file_ids, path = "my_predictions.csv") -# -# # download from Redshift -# download_civis("sample_project.brandable_user_scores") - -## ---- eval=FALSE-------------------------------------------------------------- -# model_id <- m$job$id -# m <- civis_ml_fetch_existing(model_id) - -## ---- eval=FALSE-------------------------------------------------------------- -# civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_fest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip", -# cross_validation_parameters = cv_params) - -## ---- echo=FALSE, eval=TRUE--------------------------------------------------- -path <- system.file("civis_ml_err.rds", package = 'civis') -err <- readRDS(path) -err - -## ---- eval = FALSE------------------------------------------------------------ -# e <- tryCatch({ -# civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_fest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip") -# }, civis_ml_error = function(e) e) -# get_error(e) -# fetch_logs(e) - -## ---- eval=FALSE-------------------------------------------------------------- -# retry_model <- function(max_retries = 5) { -# i <- 1 -# while (i < max_retries) { -# tryCatch({ -# m <- civis_ml(tab, dependent_variable = "upgrade", -# model_type = "random_forest_classifier", -# primary_key = "brandable_user_id", -# excluded_columns = "residential_zip") -# return(m) -# }, civis_ml_error = function(e) stop(e)) -# cat("Retry: ", i, fill = TRUE) -# i <- i + 1 -# } -# stop("Exceeded maximum retries.") -# } - diff --git a/doc/civis_ml.Rmd b/doc/civis_ml.Rmd deleted file mode 100644 index 7f85b511..00000000 --- a/doc/civis_ml.Rmd +++ /dev/null @@ -1,357 +0,0 @@ ---- -title: "Machine Learning in R with CivisML" -author: "Patrick Miller and Liz Sander" -date: "2018-1-18" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{civis_ml} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -There are so many models to build! When this becomes challenging on a local machine, offloading model building to the cloud can save a lot of time and effort. - -[CivisML](https://medium.com/civis-analytics/civisml-scikit-learn-at-scale-b01b496916ea) is a machine learning service on Civis Platform that makes this as painless as possible. You can fit many different models, do extensive hyperparameter tuning, and score data sets with millions of observations stored in remote databases. Once these models are built, they live in Civis Platform permanently and can be included into production pipelines. Results can be easily incorporated into reports and dashboards. - -CivisML is built in Python using [scikit-learn](https://scikit-learn.org/stable/), and leverages AWS behind the scenes for efficient distributed computing. However, most of its features can be used through R without knowledge of Python or AWS with the `civis_ml` function in `civis`. - -While `civis_ml` is a complex function with many arguments, basic machine learning modeling and scoring can be easily carried out. We illustrate several features of `civis_ml` with data from a fictitious company called Brandable, who is looking to predict which customers are likely to upgrade from the free to the premium service. - -## Data sources - -The first step of modeling with `civis_ml` is to specify the data source, which is the first argument. `civis_ml` works with local data frames, a CSV on local disk, [feather-format](https://github.com/wesm/feather) files, tables in Redshift, and files on S3 (the files endpoint): - -```{r, eval = FALSE} -library(civis) - -civis_ml(df, ...) -civis_ml("path/to/data.csv", ...) -civis_ml(civis_table(table_name = "schema.table", database_name = "database"), ...) -civis_ml(civis_file(1234), ...) -``` - -The Brandable data is located in a Redshift table called `sample_project.premium_training_set`. - -```{r, eval = FALSE} -options(civis.default_db = "my_database") -tab <- civis_table(table_name = "sample_project.premium_training_set") -``` - -Note that `civis_table` only returns information on where to find the data for `civis_ml`, not the data itself. `civis_table` also takes two SQL statements that can be useful for limiting the rows used for training: `sql_where`, and `sql_limit`. - - -## Modeling - -After the data source is specified, we next choose the model type. There are 13 named CivisML models that can be called from `civis_ml`, 6 for classification and 7 for regression. The name of the model corresponds to the name of the estimator in scikit-learn. It can be given in the `model_type` argument of `civis_ml`, or called directly using a `civis_ml_*` function such as `civis_ml_sparse_logistic`. - -| Name | R Workflow | Model Type | scikit-learn Documentation | -|------|:-----------|------------|-----------|------------------| - `sparse_logistic` | `civis_ml_sparse_logistic` | classification | [Logistic Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) | - `gradient_boosting_classifier` | `civis_ml_gradient_boosting_classifier` | classification | [GradientBoostingClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) | - `random_forest_classifier` | `civis_ml_random_forest_classifier` | classification | [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) | - `extra_trees_classifier` | `civis_ml_extra_trees_classifier` | classification | [ExtraTreesClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html)| - `multilayer_perceptron_classifier` | | classification | [muffnn.MLPClassifier](https://github.com/civisanalytics/muffnn) | - `stacking_classifier` | | classification | [StackedClassifier](https://github.com/civisanalytics/civisml-extensions) - `sparse_linear_regressor` | `civis_ml_sparse_linear_regressor` | regression | [LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) | - `sparse_ridge_regressor` | `civis_ml_sparse_ridge_regressor` | regression | [Ridge](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) | - `gradient_boosting_regressor` | `civis_ml_gradient_boosting_regressor` | regression | [GradientBoostingRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) | - `random_forest_regressor` | `civis_ml_random_forest_regressor` | regression | [RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html)| - `extra_trees_regressor` | `civis_ml_extra_trees_regressor` | regression | [ExtraTreesRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html)| -`multilayer_perceptron_regressor` | | regression | [muffnn.MLPRegressor](https://github.com/civisanalytics/muffnn) | -`stacking_regressor` | | regression | [StackedRegressor](https://github.com/civisanalytics/civisml-extensions) - -Documentation on the meta parameters specific to each estimator are provided in `?civis_ml_*`. For example, the regularization strength parameter `C` of `sparse_logistic` is documented in `?civis_ml_sparse_logistic`. - -For the Brandable data, we use a `random_forest` classifier to predict the probability that a customer upgrades from free to premium services. For efficiency, we can also denote a `primary_key`, and a set of `excluded_columns` that are not included in the model: - -```{r, eval = FALSE} -library(civis) -tab <- civis_table("sample_project.premium_training_set") -m <- civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_forest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip") - -m <- civis_ml_random_forest_classifier(tab, - primary_key = "brandable_user_id", - excluded_columns = "residential_zip") -``` - -Note that if the dependent variables have null values, those rows will be removed before modeling. - -### Hyperparameter Tuning - -You can tune hyperparameters using one of two methods: grid search or hyperband. CivisML will perform grid search if you pass a named list of hyperparameters and candidate values to `cross_validation_parameters`. By default, hyperparameter tuning will run in parallel, using as many jobs as possible without overloading your computing cluster. If you wish to have more control over the number of jobs running at once, you can set it using the `n_jobs` parameter. - -[Hyperband](https://arxiv.org/abs/1603.06560) is an efficient approach to hyperparameter optimization, and recommended over grid search where possible. CivisML will perform hyperband optimization if you pass the string `"hyperband"` to `cross_validation_parameters`. Hyperband cannot be used to tune GLMs. For this reason, preset GLMs do not have a hyperband option. Hyperband is supported for random forests, gradient boosted trees, extra trees, multilayer perceptrons, and the random forest and gradient boosted tree steps of stacking. It is highly recommended that multilayer perceptron models only be used with hyperband. - -For the `random_forest_classifier` in the Brandable data, we try both `"hyperband"` and grid search for hyperparameter optimization. - -```{r, eval = FALSE} -tab <- civis_table("sample_project.premium_training_set") - -# hyperband -m_hyper <- civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_forest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip", - cross_validation_parameters = 'hyperband') - -# grid search -cv_params <- list("max_depth" = c(2, 3, 5), - "n_estimators" = c(50, 100, 500)) - -m_grid <- civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_forest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip", - cross_validation_parameters = cv_params) - - -``` - -CivisML runs pre-defined models with hyperband using the following distributions: - - -| Models | Cost Parameter | Hyperband Distributions | -| ---------------------------------- | ------------------ | --------------------------------------------------------------------------- | -| gradient_boosting_classifier
gradient_boosting_regressor
GBT step in stacking_classifier
GBT step in stacking_regressor | `n_estimators`
`min = 100,`
`max = 1000` | `max_depth: randint(low=1, high=5)`
`max_features: [None, 'sqrt', 'log2', 0.5, 0.3, 0.1, 0.05, 0.01]`
`learning_rate: truncexpon(b=5, loc=.0003, scale=1./167.)` | -| ---------------------------------- | ------------------ | --------------------------------------------------------------------------- | -| random_forest_classifier
random_forest_regressor
extra_trees_classifier
extra_trees_regressor
RF step in stacking_classifier
RF step in stacking_regressor | `n_estimators`
`min = 100,`
`max = 1000` | `criterion: ['gini', 'entropy']`
`max_features: truncexpon(b=10., loc=.01, scale=1./10.11)`
`max_depth: [1, 2, 3, 4, 6, 10, None]` | -| ---------------------------------- | ------------------ | --------------------------------------------------------------------------- | -| multilayer_perceptron_classifier
multilayer_perceptron_regressor | `n_epochs`
`min = 5,`
`max = 50` | `keep_prob: uniform()`
``hidden_units: [(), (16,), (32,), (64,), (64, 64), (64, 64, 64),`
`(128,), (128, 128), (128, 128, 128), (256,),`
`(256, 256), (256, 256, 256), (512, 256, 128, 64),`
`(1024, 512, 256, 128)]`
`learning_rate: [1e-2, 2e-2, 5e-2, 8e-2, 1e-3, 2e-3, 5e-3, 8e-3, 1e-4]` | - -The truncated exponential distribution for the gradient boosting -classifier and regressor was chosen to skew the distribution toward -small values, ranging between .0003 and .03, with a mean close to -.006. Similarly, the truncated exponential distribution for the random -forest and extra trees models skews toward small values, ranging -between .01 and 1, and with a mean close to .1. - -### Stacking - -The `"stacking_classifier"` model stacks together the `"gradient_boosting_classifier"` and `"random_forest_classifier"` predefined models together with a -`glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, -scoring='log_loss')`. Defaults for the predefined models are documented in `?civis_ml`. Each column is first [standardized](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html), and then the model predictions are combined using [LogisticRegressionCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) with `penalty='l2'` and `tol=1e-08`. The `"stacking_regressor"` works similarly, stacking together the `"gradient_boosting_regressor"` and `"random_forest_regressor"` models and a `glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, -tol=1e-5, scoring='r2')`, combining them using [NonNegativeLinearRegression](https://github.com/civisanalytics/civisml-extensions). - -```{r, eval = FALSE} -m_stack <- civis_ml(tab, dependent_variable = "upgrade", - model_type = "stacking_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip") -``` - -## Results - -A simple summary of the results from the best fitting model is provided with `print`: - -```{r, eval=FALSE} -m -``` - - -```{r run_model, eval=FALSE, echo=FALSE} -# use this chunk to actually update the model if necessary -library(civis) -tab <- civis_table("sample_project.premium_training_set") -cv_params <- list("max_depth" = c(2, 3, 5), - "n_estimators" = c(50, 100, 500)) - - -m <- civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_forest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip", - cross_validation_parameters = cv_params) -saveRDS(m, file = "../inst/civis_ml_brandable.rds") - -oos <- fetch_oos_scores(m) -saveRDS(oos, file = "../inst/civis_ml_oos.rds") - -err_m <- tryCatch({ - civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_fest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip", - cross_validation_parameters = cv_params) - }, error = function(e) e) -saveRDS(err_m, file = "../inst/civis_ml_err.rds") - -``` - -```{r, eval=TRUE, echo=FALSE} -library(civis) -path <- system.file("civis_ml_brandable.rds", package = 'civis') -m <- readRDS(path) -m -``` -Following the link takes you to a summary of the model results in Civis Platform. Additional metrics can be computed with `get_metric`: - -```{r} -get_metric(m, "accuracy") -get_metric(m, "confusion_matrix") -get_metric(m, "roc_auc") -``` - -Out of sample (or out of fold) scores used in training can be retrieved using `fetch_oos_scores`: - -```{r, echo=TRUE, eval=FALSE} -oos <- fetch_oos_scores(m) -head(oos) -``` - -```{r, echo=FALSE, eval=TRUE} -path <- system.file("civis_ml_oos.rds", package = 'civis') -oos <- readRDS(path) -head(oos) -``` - -## Diagnostics - -For classification problems, `plot` produces a a decile plot using `ggplot2`. For the premium upgrade model, the decile plot shows that the top-scoring 10\% of individuals contain 2.20 times as many targets (people who upgraded) as a randomly selected list of the same size. - -```{r, fig.width = 5} -plot(m) -``` - -For regression problems, `plot` produces a binned scatter-plot of $y$ against $\hat{y}$. - -`hist` shows the histogram of out of sample (out of fold scores), also using `ggplot2`: - -```{r} -hist(m) -``` - - -## Prediction and Scoring - -CivisML can also be used to score models on hundreds of millions of rows, and distributed over many compute instances. Like many estimators in R, this is done through a `predict` method. The `newdata` argument of `predict` can take any data source supported in `civis_ml`. Here we use a table in Redshift containing all Brandable users, and output the result to another table in Redshift: - -```{r, eval=FALSE} -pred_tab <- civis_table(table_name = "sample_project.brandable_all_users") -pred_job <- predict(m, newdata = pred_tab, - output_table = "sample_project.brandable_user_scores") -``` - -Like training and validation, scoring is distributed by default, using up to 90 percent of your computing cluster resources. If you would like to have more control over the number of jobs that are run at once, you can set a maximum using `n_jobs`: - -```{r, eval=FALSE} -pred_job <- predict(m, newdata = pred_tab, - output_table = "sample_project.brandable_user_scores", - n_jobs = 25) -``` - -The predictions can be loaded into memory using `fetch_predictions`, which downloads directly from S3: - -```{r, eval=FALSE} -yhat <- fetch_predictions(pred_job) -``` - -Note that if the table of predictions exceeds available memory, it may be helpful to use `download_civis` instead. - -```{r, eval=FALSE} -# download from S3 -download_civis(pred_job$model_info$output_file_ids, path = "my_predictions.csv") - -# download from Redshift -download_civis("sample_project.brandable_user_scores") -``` - -## Retrieving Existing models - -An existing model (or particular run of an existing model) can be retrieved using `civis_ml_fetch_existing`: - -```{r, eval=FALSE} -model_id <- m$job$id -m <- civis_ml_fetch_existing(model_id) -``` - -## Error Handling - -Unfortunately, many kinds of errors can occur. When an error occurs within CivisML, a `civis_ml_error` is thrown. By default, the log from the CivisML job is printed, which is useful for debugging. - -Here is an example error from misspelling the model type: - -```{r, eval=FALSE} -civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_fest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip", - cross_validation_parameters = cv_params) -``` - -```{r, echo=FALSE, eval=TRUE} -path <- system.file("civis_ml_err.rds", package = 'civis') -err <- readRDS(path) -err -``` - -If you don't understand the error message, providing the error message, job, and run ids to support is the best way to get help! - -## Programming with `civis_ml` - -When programming with `civis_ml`, errors can be caught using the base R `try` or `tryCatch`. In `civis`, we provide functions for getting debugging information using `get_error` or just the logs using `fetch_logs`. - -```{r, eval = FALSE} -e <- tryCatch({ - civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_fest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip") - }, civis_ml_error = function(e) e) -get_error(e) -fetch_logs(e) -``` - -Error handling can be used to implement more robust workflow programming with `civis_ml`. In the following function, we implement `retry_model`, which retries on e.g. connection failures but not on a `civis_ml_error`. - -```{r, eval=FALSE} -retry_model <- function(max_retries = 5) { - i <- 1 - while (i < max_retries) { - tryCatch({ - m <- civis_ml(tab, dependent_variable = "upgrade", - model_type = "random_forest_classifier", - primary_key = "brandable_user_id", - excluded_columns = "residential_zip") - return(m) - }, civis_ml_error = function(e) stop(e)) - cat("Retry: ", i, fill = TRUE) - i <- i + 1 - } - stop("Exceeded maximum retries.") -} -``` - -Workflow programming could be further enhanced by printing the logs, storing the error object, or writing error logs to a file or database. - -## Appendix - -### Parallelization - -To fit many models in parallel using `parallel`, `foreach`, or `future`, check out [this article](https://civisanalytics.github.io/civis-r/articles/concurrency.html) or the vignette on concurrency at `browseVignettes("civis")`. - -### Sample weights - -Many estimators take a `sample_weight` argument. This can be be specified with the `fit_params` argument of `civis_ml` using `list(sample_weight = 'survey_weight_column')`. - -### Missing data - -Modeling data must be complete. Any missing values will be imputed with the mean of non-null values in a column. - -### CivisML Versions - -By default, CivisML uses its latest version in production. -If you would like a specific version -(e.g., for a production pipeline where pinning the CivisML version is desirable), -both `civis_ml` and the `civis_ml_*` functions have the optional parameter -``civisml_version`` that accepts a string, e.g., ``'v2.3'`` -for CivisML v2.3. Please see [here](https://civis.zendesk.com/hc/en-us/articles/360000260011-CivisML) for the list of CivisML versions. - -### More information - -Custom estimators can be written in Python and included in CivisML if they follow the scikit-learn API. For example, the `sparse_logistic`, `sparse_linear_regressor`, and `sparse_ridge_regressor` models all use the public Civis Analytics [glmnet](https://github.com/civisanalytics/python-glmnet) wrapper in Python. - -Browse [the CivisML documentation](https://civis-python.readthedocs.io/en/stable/ml.html) for more details! - diff --git a/doc/civis_ml.html b/doc/civis_ml.html deleted file mode 100644 index 83faf6b3..00000000 --- a/doc/civis_ml.html +++ /dev/null @@ -1,874 +0,0 @@ - - - - - - - - - - - - - - - -Machine Learning in R with CivisML - - - - - - - - - - - - - - - - - - - - - - - - - - -

Machine Learning in R with CivisML

-

Patrick Miller and Liz Sander

-

2018-1-18

- - - -

There are so many models to build! When this becomes challenging on a -local machine, offloading model building to the cloud can save a lot of -time and effort.

-

CivisML -is a machine learning service on Civis Platform that makes this as -painless as possible. You can fit many different models, do extensive -hyperparameter tuning, and score data sets with millions of observations -stored in remote databases. Once these models are built, they live in -Civis Platform permanently and can be included into production -pipelines. Results can be easily incorporated into reports and -dashboards.

-

CivisML is built in Python using scikit-learn, and leverages -AWS behind the scenes for efficient distributed computing. However, most -of its features can be used through R without knowledge of Python or AWS -with the civis_ml function in civis.

-

While civis_ml is a complex function with many -arguments, basic machine learning modeling and scoring can be easily -carried out. We illustrate several features of civis_ml -with data from a fictitious company called Brandable, who is looking to -predict which customers are likely to upgrade from the free to the -premium service.

-
-

Data sources

-

The first step of modeling with civis_ml is to specify -the data source, which is the first argument. civis_ml -works with local data frames, a CSV on local disk, feather-format files, tables -in Redshift, and files on S3 (the files endpoint):

-
library(civis)
-
-civis_ml(df, ...)
-civis_ml("path/to/data.csv", ...)
-civis_ml(civis_table(table_name = "schema.table", database_name = "database"), ...)
-civis_ml(civis_file(1234), ...)
-

The Brandable data is located in a Redshift table called -sample_project.premium_training_set.

-
options(civis.default_db = "my_database")
-tab <- civis_table(table_name = "sample_project.premium_training_set")
-

Note that civis_table only returns information on where -to find the data for civis_ml, not the data itself. -civis_table also takes two SQL statements that can be -useful for limiting the rows used for training: sql_where, -and sql_limit.

-
-
-

Modeling

-

After the data source is specified, we next choose the model type. -There are 13 named CivisML models that can be called from -civis_ml, 6 for classification and 7 for regression. The -name of the model corresponds to the name of the estimator in -scikit-learn. It can be given in the model_type argument of -civis_ml, or called directly using a -civis_ml_* function such as -civis_ml_sparse_logistic.

- ------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameR WorkflowModel Typescikit-learn Documentation
sparse_logisticcivis_ml_sparse_logisticclassificationLogistic -Regression
gradient_boosting_classifiercivis_ml_gradient_boosting_classifierclassificationGradientBoostingClassifier
random_forest_classifiercivis_ml_random_forest_classifierclassificationRandomForestClassifier
extra_trees_classifiercivis_ml_extra_trees_classifierclassificationExtraTreesClassifier
multilayer_perceptron_classifierclassificationmuffnn.MLPClassifier
stacking_classifierclassificationStackedClassifier
sparse_linear_regressorcivis_ml_sparse_linear_regressorregressionLinearRegression
sparse_ridge_regressorcivis_ml_sparse_ridge_regressorregressionRidge
gradient_boosting_regressorcivis_ml_gradient_boosting_regressorregressionGradientBoostingRegressor
random_forest_regressorcivis_ml_random_forest_regressorregressionRandomForestRegressor
extra_trees_regressorcivis_ml_extra_trees_regressorregressionExtraTreesRegressor
multilayer_perceptron_regressorregressionmuffnn.MLPRegressor
stacking_regressorregressionStackedRegressor
-

Documentation on the meta parameters specific to each estimator are -provided in ?civis_ml_*. For example, the regularization -strength parameter C of sparse_logistic is -documented in ?civis_ml_sparse_logistic.

-

For the Brandable data, we use a random_forest -classifier to predict the probability that a customer upgrades from free -to premium services. For efficiency, we can also denote a -primary_key, and a set of excluded_columns -that are not included in the model:

-
library(civis)
-tab <- civis_table("sample_project.premium_training_set")
-m   <- civis_ml(tab, dependent_variable = "upgrade",
-                model_type = "random_forest_classifier",
-                primary_key = "brandable_user_id",
-                excluded_columns = "residential_zip")
-
-m <- civis_ml_random_forest_classifier(tab,
-      primary_key = "brandable_user_id",
-      excluded_columns = "residential_zip")
-

Note that if the dependent variables have null values, those rows -will be removed before modeling.

-
-

Hyperparameter Tuning

-

You can tune hyperparameters using one of two methods: grid search or -hyperband. CivisML will perform grid search if you pass a named list of -hyperparameters and candidate values to -cross_validation_parameters. By default, hyperparameter -tuning will run in parallel, using as many jobs as possible without -overloading your computing cluster. If you wish to have more control -over the number of jobs running at once, you can set it using the -n_jobs parameter.

-

Hyperband is an -efficient approach to hyperparameter optimization, and recommended over -grid search where possible. CivisML will perform hyperband optimization -if you pass the string "hyperband" to -cross_validation_parameters. Hyperband cannot be used to -tune GLMs. For this reason, preset GLMs do not have a hyperband option. -Hyperband is supported for random forests, gradient boosted trees, extra -trees, multilayer perceptrons, and the random forest and gradient -boosted tree steps of stacking. It is highly recommended that multilayer -perceptron models only be used with hyperband.

-

For the random_forest_classifier in the Brandable data, -we try both "hyperband" and grid search for hyperparameter -optimization.

-
tab <- civis_table("sample_project.premium_training_set")
-
-# hyperband
-m_hyper <- civis_ml(tab, dependent_variable = "upgrade",
-              model_type = "random_forest_classifier",
-              primary_key = "brandable_user_id",
-              excluded_columns = "residential_zip",
-              cross_validation_parameters = 'hyperband')
-
-# grid search
-cv_params <- list("max_depth" = c(2, 3, 5),
-                  "n_estimators" = c(50, 100, 500))
-
-m_grid <- civis_ml(tab, dependent_variable = "upgrade",
-              model_type = "random_forest_classifier",
-              primary_key = "brandable_user_id",
-              excluded_columns = "residential_zip",
-              cross_validation_parameters = cv_params)
-

CivisML runs pre-defined models with hyperband using the following -distributions:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModelsCost ParameterHyperband Distributions
gradient_boosting_classifier
gradient_boosting_regressor
-GBT step in stacking_classifier
GBT step in stacking_regressor
n_estimators
min = 100,
-max = 1000
max_depth: randint(low=1, high=5)
-max_features: [None, 'sqrt', 'log2', 0.5, 0.3, 0.1, 0.05, 0.01] -
-learning_rate: truncexpon(b=5, loc=.0003, scale=1./167.)
———————————-———————————————————————————————
random_forest_classifier
random_forest_regressor
-extra_trees_classifier
extra_trees_regressor
RF step in -stacking_classifier
RF step in stacking_regressor
n_estimators
min = 100,
-max = 1000
criterion: ['gini', 'entropy']
-max_features: truncexpon(b=10., loc=.01, scale=1./10.11) -
max_depth: [1, 2, 3, 4, 6, 10, None]
———————————-———————————————————————————————
multilayer_perceptron_classifier
-multilayer_perceptron_regressor
n_epochs
min = 5,
-max = 50
keep_prob: uniform()
-`hidden_units: [(), (16,), (32,), (64,), (64, 64), (64, 64, 64), -
(128,), (128, 128), (128, 128, 128), (256,),
-(256, 256), (256, 256, 256), (512, 256, 128, 64),
-(1024, 512, 256, 128)]
-learning_rate: [1e-2, 2e-2, 5e-2, 8e-2, 1e-3, 2e-3, 5e-3, 8e-3, 1e-4]
-

The truncated exponential distribution for the gradient boosting -classifier and regressor was chosen to skew the distribution toward -small values, ranging between .0003 and .03, with a mean close to .006. -Similarly, the truncated exponential distribution for the random forest -and extra trees models skews toward small values, ranging between .01 -and 1, and with a mean close to .1.

-
-
-

Stacking

-

The "stacking_classifier" model stacks together the -"gradient_boosting_classifier" and -"random_forest_classifier" predefined models together with -a -glmnet.LogitNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='log_loss'). -Defaults for the predefined models are documented in -?civis_ml. Each column is first standardized, -and then the model predictions are combined using LogisticRegressionCV -with penalty='l2' and tol=1e-08. The -"stacking_regressor" works similarly, stacking together the -"gradient_boosting_regressor" and -"random_forest_regressor" models and a -glmnet.ElasticNet(alpha=0, n_splits=4, max_iter=10000, tol=1e-5, scoring='r2'), -combining them using NonNegativeLinearRegression.

-
m_stack <- civis_ml(tab, dependent_variable = "upgrade",
-              model_type = "stacking_classifier",
-              primary_key = "brandable_user_id",
-              excluded_columns = "residential_zip")
-
-
-
-

Results

-

A simple summary of the results from the best fitting model is -provided with print:

-
m
-
## <CivisML random_forest_classifier>
-## https://platform.civisanalytics.com/#/models/7072485
-## Job id:  7072485  Run id:  58251183 
-## 
-## AUC:  0.8009
-## upgrade:
-##                  0      1
-## Prop Correct 0.923 0.3297
-

Following the link takes you to a summary of the model results in -Civis Platform. Additional metrics can be computed with -get_metric:

-
get_metric(m, "accuracy")
-
## [1] 0.761
-
get_metric(m, "confusion_matrix")
-
##      [,1] [,2]
-## [1,]  671   56
-## [2,]  183   90
-
get_metric(m, "roc_auc")
-
## [1] 0.8009004
-

Out of sample (or out of fold) scores used in training can be -retrieved using fetch_oos_scores:

-
oos <- fetch_oos_scores(m)
-head(oos)
-
##   brandable_user_id upgrade_1
-## 1   00214b9181f2347    0.3280
-## 2   0b6cbd77cb8d98b    0.7140
-## 3   12ca082b063a3bf    0.4480
-## 4   130060adea791e8    0.2060
-## 5   1495366621d3834    0.3152
-## 6   1a8ed19916ae7c2    0.1600
-
-
-

Diagnostics

-

For classification problems, plot produces a a decile -plot using ggplot2. For the premium upgrade model, the -decile plot shows that the top-scoring 10% of individuals contain 2.20 -times as many targets (people who upgraded) as a randomly selected list -of the same size.

-
plot(m)
-

-

For regression problems, plot produces a binned -scatter-plot of \(y\) against \(\hat{y}\).

-

hist shows the histogram of out of sample (out of fold -scores), also using ggplot2:

-
hist(m)
-

-
-
-

Prediction and Scoring

-

CivisML can also be used to score models on hundreds of millions of -rows, and distributed over many compute instances. Like many estimators -in R, this is done through a predict method. The -newdata argument of predict can take any data -source supported in civis_ml. Here we use a table in -Redshift containing all Brandable users, and output the result to -another table in Redshift:

-
pred_tab <- civis_table(table_name = "sample_project.brandable_all_users")
-pred_job <- predict(m, newdata = pred_tab,
-                    output_table = "sample_project.brandable_user_scores")
-

Like training and validation, scoring is distributed by default, -using up to 90 percent of your computing cluster resources. If you would -like to have more control over the number of jobs that are run at once, -you can set a maximum using n_jobs:

-
pred_job <- predict(m, newdata = pred_tab,
-                    output_table = "sample_project.brandable_user_scores",
-                    n_jobs = 25)
-

The predictions can be loaded into memory using -fetch_predictions, which downloads directly from S3:

-
yhat <- fetch_predictions(pred_job)
-

Note that if the table of predictions exceeds available memory, it -may be helpful to use download_civis instead.

-
# download from S3
-download_civis(pred_job$model_info$output_file_ids, path = "my_predictions.csv")
-
-# download from Redshift
-download_civis("sample_project.brandable_user_scores")
-
-
-

Retrieving Existing models

-

An existing model (or particular run of an existing model) can be -retrieved using civis_ml_fetch_existing:

-
model_id <- m$job$id
-m <- civis_ml_fetch_existing(model_id)
-
-
-

Error Handling

-

Unfortunately, many kinds of errors can occur. When an error occurs -within CivisML, a civis_ml_error is thrown. By default, the -log from the CivisML job is printed, which is useful for debugging.

-

Here is an example error from misspelling the model type:

-
civis_ml(tab, dependent_variable = "upgrade",
-         model_type = "random_fest_classifier",
-         primary_key = "brandable_user_id",
-         excluded_columns = "residential_zip",
-         cross_validation_parameters = cv_params)
-
## <civis_ml_error>
-## scripts_get_custom_runs(id = 7077157, run_id = 58263925): 
-## 2017-08-29 13:01:54 PM CDT Queued
-## 2017-08-29 13:01:55 PM CDT Running
-## 2017-08-29 13:01:57 PM CDT Dedicating resources
-## 2017-08-29 13:01:58 PM CDT Downloading code and container
-## 2017-08-29 13:01:59 PM CDT Executing script
-## 2017-08-29 13:02:03 PM CDT Please select one of the pre-defined models: ['sparse_logistic', 'sparse_linear_regressor', 'sparse_ridge_regressor', 'gradient_boosting_classifier', 'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_regressor', 'random_forest_regressor', 'extra_trees_regressor']
-## 2017-08-29 13:02:05 PM CDT Process used approximately 97.57 MiB of its 3188 MiB memory limit
-## 2017-08-29 13:02:05 PM CDT Failed
-## 2017-08-29 13:02:06 PM CDT Error on job: Process ended with an error, exiting: 1.
-

If you don’t understand the error message, providing the error -message, job, and run ids to support is the best way to get help!

-
-
-

Programming with civis_ml

-

When programming with civis_ml, errors can be caught -using the base R try or tryCatch. In -civis, we provide functions for getting debugging -information using get_error or just the logs using -fetch_logs.

-
e <- tryCatch({
-  civis_ml(tab, dependent_variable = "upgrade",
-        model_type = "random_fest_classifier",
-        primary_key = "brandable_user_id",
-        excluded_columns = "residential_zip")
-  }, civis_ml_error = function(e) e)
-get_error(e)
-fetch_logs(e)
-

Error handling can be used to implement more robust workflow -programming with civis_ml. In the following function, we -implement retry_model, which retries on e.g. connection -failures but not on a civis_ml_error.

-
retry_model <- function(max_retries = 5) {
-  i <- 1
-  while (i < max_retries) {
-    tryCatch({
-      m <- civis_ml(tab, dependent_variable = "upgrade",
-               model_type = "random_forest_classifier",
-               primary_key = "brandable_user_id",
-               excluded_columns = "residential_zip")
-      return(m)
-    }, civis_ml_error = function(e) stop(e))
-    cat("Retry: ", i, fill = TRUE)
-    i <- i + 1
-  }
-  stop("Exceeded maximum retries.")
-}
-

Workflow programming could be further enhanced by printing the logs, -storing the error object, or writing error logs to a file or -database.

-
-
-

Appendix

-
-

Parallelization

-

To fit many models in parallel using parallel, -foreach, or future, check out this -article or the vignette on concurrency at -browseVignettes("civis").

-
-
-

Sample weights

-

Many estimators take a sample_weight argument. This can -be be specified with the fit_params argument of -civis_ml using -list(sample_weight = 'survey_weight_column').

-
-
-

Missing data

-

Modeling data must be complete. Any missing values will be imputed -with the mean of non-null values in a column.

-
-
-

CivisML Versions

-

By default, CivisML uses its latest version in production. If you -would like a specific version (e.g., for a production pipeline where -pinning the CivisML version is desirable), both civis_ml -and the civis_ml_* functions have the optional parameter -civisml_version that accepts a string, e.g., -'v2.3' for CivisML v2.3. Please see here -for the list of CivisML versions.

-
-
-

More information

-

Custom estimators can be written in Python and included in CivisML if -they follow the scikit-learn API. For example, the -sparse_logistic, sparse_linear_regressor, and -sparse_ridge_regressor models all use the public Civis -Analytics glmnet -wrapper in Python.

-

Browse the CivisML -documentation for more details!

-
-
- - - - - - - - - - - diff --git a/doc/civis_scripts.R b/doc/civis_scripts.R deleted file mode 100644 index cb2e7cda..00000000 --- a/doc/civis_scripts.R +++ /dev/null @@ -1,199 +0,0 @@ -## ----setup, include = FALSE--------------------------------------------------- -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>", - eval = FALSE -) - -## ----------------------------------------------------------------------------- -# # create a container script with a parameter -# script <- scripts_post_containers( -# required_resources = list(cpu = 1024, memory = 50, diskSpace = 15), -# docker_command = 'cd /package_dir && Rscript inst/run_script.R', -# docker_image_name = 'civisanalytics/datascience-r', -# name = 'SCRIPT NAME', -# params = list( -# list(name = 'NAME_OF_ENV_VAR', -# label = 'Name User Sees', -# type = 'string', -# required = TRUE) -# ) -# ) -# -# # publish the container script as a template -# template <- templates_post_scripts(script$id, name = 'TEMPLATE NAME', note = 'Markdown Docs') -# -# # run a template script, returning file ids of run outputs -# out <- run_template(template$id) -# -# # post a file or JSONValue run output within a script -# write_job_output('filename.csv') -# json_values_post(jsonlite::toJSON(my_list), 'my_list.json') -# -# # get run output file ids of a script -# out <- fetch_output_file_ids(civis_script(id)) -# -# # get csv run outputs of a script -# df <- read_civis(civis_script(id), regex = '.csv', using = read.csv) -# -# # get JSONValue run outputs -# my_list <- read_civis(civis_script(id)) -# - -## ---- eval = FALSE------------------------------------------------------------ -# source <- c(' -# print("Hello World!") -# ') -# job <- scripts_post_r(name = 'Hello!', source = source) - -## ---- eval = FALSE------------------------------------------------------------ -# run <- scripts_post_r_runs(job$id) -# -# # check the status -# scripts_get_r_runs(job$id, run$id) -# -# # automatically poll until the job completes -# await(scripts_get_r_runs, id = job$id, run_id = run$id) - -## ----------------------------------------------------------------------------- -# run_script <- function(source, name = 'Cool') { -# job <- scripts_post_r(name = name, source = source) -# run <- scripts_post_r_runs(job$id) -# await(scripts_get_r_runs, id = job$id, run_id = run$id) -# } - -## ---- eval=FALSE-------------------------------------------------------------- -# source <- c(" -# library(civis) -# data(iris) -# write.csv(iris, 'iris.csv') -# job_id <- as.numeric(Sys.getenv('CIVIS_JOB_ID')) -# run_id <- as.numeric(Sys.getenv('CIVIS_RUN_ID')) -# file_id <- write_civis_file('iris.csv') -# scripts_post_r_runs_outputs(job_id, run_id, object_type = 'File', object_id = file_id) -# ") -# run <- run_script(source) - -## ---- eval=FALSE-------------------------------------------------------------- -# source <- c(" -# library(civis) -# data(iris) -# write.csv(iris, 'iris.csv') -# write_job_output('iris.csv') -# ") -# run <- run_script(source) - -## ---- eval=FALSE-------------------------------------------------------------- -# source <- c(" -# library(civis) -# library(jsonlite) -# my_farm <- list(cows = 1, ducks = list(mallard = 2, goldeneye = 1)) -# json_values_post(jsonlite::toJSON(my_farm), name = 'my_farm.json') -# ") -# run_farm <- run_script(source) - -## ----------------------------------------------------------------------------- -# out <- scripts_list_r_runs_outputs(run$rId, run$id) -# iris <- read_civis(out$objectId, using = read.csv) - -## ---- eval = FALSE------------------------------------------------------------ -# # get csv run outputs -# iris <- read_civis(civis_script(run$rId), regex = '.csv', using = read.csv) -# -# # get JSONValues -# my_farm <- read_civis(civis_script(run_farm$rId)) - -## ---- eval=FALSE-------------------------------------------------------------- -# # Add 'params' and 'arguments' to run_script -# run_script <- function(source, args, name = 'Cool') { -# params <- list( # params is a list of individual parameters -# list( -# name = 'PET_NAME', # name of the environment variable with the user value -# label = 'Pet Name', # name displaayed to the user -# type = 'string', # type -# required = TRUE # required? -# ) -# ) -# job <- scripts_post_r(name = name, -# source = source, -# params = params, -# arguments = args) -# run <- scripts_post_r_runs(job$id) -# await(scripts_get_r_runs, id = job$id, run_id = run$id) -# } -# -# # Access the PET_NAME variable -# source <- c(' -# library(civis) -# pet_name <- Sys.getenv("PET_NAME") -# msg <- paste0("Hello", pet_name, "!") -# print(msg) -# ') -# -# # Let's run it! Here we pass the argument 'Fitzgerald' to the -# # parameter 'PET_NAME' that we created. -# run_script(source, name = 'Pet Greeting', args = list(PET_NAME = 'Fitzgerald')) -# - -## ---- eval=FALSE-------------------------------------------------------------- -# params <- list( -# list( -# name = 'PET_NAME', -# label = 'Pet Name', -# type = 'string', -# required = TRUE -# ) -# ) -# job <- scripts_post_r(name = 'Pet Greeter', -# source = source, -# params = params) - -## ---- eval=FALSE-------------------------------------------------------------- -# note <- c(" -# # Pet Greeter -# -# Greets your pet, given its name! -# -# For your pet to receive the greeting, it must be a Civis Platform -# user with the ability to read. -# -# Parameters: -# * Pet Name: string, Name of pet. -# -# -# Returns: -# * Nothing -# ") -# template <- templates_post_scripts(script_id = job$id, note = note, name = 'Pet Greeter') - -## ---- eval=FALSE-------------------------------------------------------------- -# job <- scripts_post_custom(id, arguments = arguments, ...) -# run <- scripts_post_custom_runs(job$id) -# await(scripts_get_custom_runs, id = job$id, run_id = run$id) - -## ---- eval = FALSE------------------------------------------------------------ -# out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES')) - -## ---- eval = FALSE------------------------------------------------------------ -# # We might need to find the project id first -# search_list(type = 'project', 'My project Name') -# out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'), -# target_project_id = project_id) - -## ----eval=FALSE--------------------------------------------------------------- -# templates_patch_scripts(template_id$id, note = new_note) - -## ---- eval = FALSE------------------------------------------------------------ -# source <- c(' -# library(civis) -# pet_name <- Sys.getenv("PET_NAME") -# msg <- paste0("Hello ", pet_name, "! Would you care for a sandwich?") -# print(msg) -# ') -# scripts_patch_r(id = job$id, name = 'Pet Greeter', -# source = source, -# params = params) - -## ----------------------------------------------------------------------------- -# templates_patch_scripts(template$id, archived = TRUE) - diff --git a/doc/civis_scripts.Rmd b/doc/civis_scripts.Rmd deleted file mode 100644 index 1f06d154..00000000 --- a/doc/civis_scripts.Rmd +++ /dev/null @@ -1,382 +0,0 @@ ---- -title: "Productionizing with Civis Scripts" -author: "Patrick Miller" -date: "`r Sys.Date()`" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Productionizing with Civis Scripts} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r setup, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>", - eval = FALSE -) -``` - -Civis Scripts are the way to productionize your code with Civis Platform. -You've probably used three of the four types of scripts already in the Civis Platform UI ("Code" --> "Scripts"): -_language_ ([R](https://platform.civisanalytics.com/spa/#/scripts/new?type=r), [Python3](https://platform.civisanalytics.com/spa/#/scripts/new?type=python), [javascript](https://platform.civisanalytics.com/spa/#/scripts/new?type=javascript), and [sql](https://platform.civisanalytics.com/spa/#/scripts/new?type=sql)), [_container_](https://platform.civisanalytics.com/spa/#/scripts/new?type=container), -and [_custom_](https://platform.civisanalytics.com/spa/#/scripts/new?type=custom&fromTemplateId=11219). -If you've run any of these scripts in Civis Platform, you've already started _productionizing_ your -code. Most loosely, productionizing means that your code now runs on a -remote server instead of your local or development machine. - -You probably already know some of the benefits too: - -1. Easily schedule and automate tasks, and include tasks in workflows. -2. Ensure your code doesn't break in the future when dependencies change. -3. Share code with others without them worrying about dependencies or language compatibility. -4. Rapidly deploy fixes and changes. - -This guide will cover how to programmatically do the same tasks using the API -that you are used to doing in GUI. Instead of typing in values for the parameters or clicking to -download outputs, you can do the same thing in your programs. Hooray for automation! - -Specifically, this guide will cover how to programmatically read outputs, -kick off new script runs, and publish your own script templates -to share your code with others. It will make heavy use of API functions -directly, but highlight convenient wrappers for -common tasks where they have been implemented already. - -Ready? Buckle in! - -## Script Concepts and Overview - -A script is a job that executes code in Civis Platform. A script accepts user input through _parameters_, gives values back to the user as _run outputs_, and records any _logs_ along the way. - -A script author can share language and container scripts with others by letting users _clone_ the script. But if an author makes a change to the script such as fixing a bug or adding a feature, users will have to re-clone the script to -get access to those changes. - -A better way to share code with others is with _template_ scripts. A template script is a 'published' language or container script. The script that the template runs is the _backing script_ of the template. - -Once a container or language script is published as a template, users can create -their own instances of the template. -These instances are called _custom_ scripts and they inherit all changes made to the template. -This feature makes it easy to share code with others and to rapidly deploy changes and fixes. - -## Quick Start - -```{r} -# create a container script with a parameter -script <- scripts_post_containers( - required_resources = list(cpu = 1024, memory = 50, diskSpace = 15), - docker_command = 'cd /package_dir && Rscript inst/run_script.R', - docker_image_name = 'civisanalytics/datascience-r', - name = 'SCRIPT NAME', - params = list( - list(name = 'NAME_OF_ENV_VAR', - label = 'Name User Sees', - type = 'string', - required = TRUE) - ) -) - -# publish the container script as a template -template <- templates_post_scripts(script$id, name = 'TEMPLATE NAME', note = 'Markdown Docs') - -# run a template script, returning file ids of run outputs -out <- run_template(template$id) - -# post a file or JSONValue run output within a script -write_job_output('filename.csv') -json_values_post(jsonlite::toJSON(my_list), 'my_list.json') - -# get run output file ids of a script -out <- fetch_output_file_ids(civis_script(id)) - -# get csv run outputs of a script -df <- read_civis(civis_script(id), regex = '.csv', using = read.csv) - -# get JSONValue run outputs -my_list <- read_civis(civis_script(id)) - -``` - - - -## Creating and Running Scripts - -Let's make these concepts concrete with an example! We'll use the 'R' language script throughout, -but `container` scripts work exactly the same way. In the second section, -we'll cover `custom` and `template` scripts. - -### An Example Script - -The `post` method creates the job and returns a list of metadata about it, including its type. - -```{r, eval = FALSE} -source <- c(' - print("Hello World!") -') -job <- scripts_post_r(name = 'Hello!', source = source) -``` - -Each script can be uniquely identified by its _job id_. If you have a job id -but don't know what kind of script it is, you can do `jobs_get(id)`. - -Each script type is associated with its own API endpoints. For instance, to post a job of each script type, you need `scripts_post_r`, `scripts_post_containers`, `scripts_post_custom`, or `templates_post_scripts`. - -This job hasn't been run yet. To kick off a run do: - -```{r, eval = FALSE} -run <- scripts_post_r_runs(job$id) - -# check the status -scripts_get_r_runs(job$id, run$id) - -# automatically poll until the job completes -await(scripts_get_r_runs, id = job$id, run_id = run$id) -``` - -Since kicking off a job and polling until it completes is -a really common task for this guide, let's make it a function: - -```{r} -run_script <- function(source, name = 'Cool') { - job <- scripts_post_r(name = name, source = source) - run <- scripts_post_r_runs(job$id) - await(scripts_get_r_runs, id = job$id, run_id = run$id) -} -``` - -### Run Outputs -This script isn't very useful because it doesn't produce any output that we can access. -To add an output to a job, we can use `scripts_post_r_runs_outputs`. The two most -common types of run outputs are `Files` and `JSONValues`. - -#### Files - -We can specify adding a `File` as a run output by uploading the object to S3 -with `write_civis_file` and setting `object_type` in `scripts_post_r_runs_outputs` to `File`. -Notice that the environment variables `CIVIS_JOB_ID` and `CIVIS_RUN_ID` are -automatically inserted into the environment for us to have access to. -```{r, eval=FALSE} -source <- c(" - library(civis) - data(iris) - write.csv(iris, 'iris.csv') - job_id <- as.numeric(Sys.getenv('CIVIS_JOB_ID')) - run_id <- as.numeric(Sys.getenv('CIVIS_RUN_ID')) - file_id <- write_civis_file('iris.csv') - scripts_post_r_runs_outputs(job_id, run_id, object_type = 'File', object_id = file_id) -") -run <- run_script(source) -``` - -Since this pattern is so common, we replaced it with the function `write_job_output` which -you can use to post a filename as a run output for any script type. - -```{r, eval=FALSE} -source <- c(" - library(civis) - data(iris) - write.csv(iris, 'iris.csv') - write_job_output('iris.csv') -") -run <- run_script(source) -``` - -#### JSONValues - -It is best practice to make run outputs -as portable as possible because the script can be called by any language. -For arbitrary data, JSONValues are often the best choice. -Regardless, it is user friendly to add the file extension to the name of the run output. - -Adding JSONValue run outputs is common enough -for it to be implemented directly as a Civis API endpoint, `json_values_post`: - -```{r, eval=FALSE} -source <- c(" - library(civis) - library(jsonlite) - my_farm <- list(cows = 1, ducks = list(mallard = 2, goldeneye = 1)) - json_values_post(jsonlite::toJSON(my_farm), name = 'my_farm.json') -") -run_farm <- run_script(source) -``` - -To retrieve script outputs we can use `scripts_list_r_runs_outputs`: - -```{r} -out <- scripts_list_r_runs_outputs(run$rId, run$id) -iris <- read_civis(out$objectId, using = read.csv) -``` - -Since this pattern is also common, you can simply use `read_civis` directly. -This will work for any script type. Use `regex` and `using` to filter -run outputs by file extension, and provide the appropriate reading function. -JSONValues can be read automatically. - -```{r, eval = FALSE} -# get csv run outputs -iris <- read_civis(civis_script(run$rId), regex = '.csv', using = read.csv) - -# get JSONValues -my_farm <- read_civis(civis_script(run_farm$rId)) -``` - -### Script Parameters - -Scripts are more useful if their behavior can be configured by the user, which can be done with script parameters. -Script _parameters_ are placeholders for input by the user. Specific values of the parameters input by -the user are called _arguments_. Here, we modify `run_script` to automatically add a parameter, -and simultaneously take a value of that parameter provided by the user. In the script itself, -we can access the parameter as an environment variable. - -```{r, eval=FALSE} -# Add 'params' and 'arguments' to run_script -run_script <- function(source, args, name = 'Cool') { - params <- list( # params is a list of individual parameters - list( - name = 'PET_NAME', # name of the environment variable with the user value - label = 'Pet Name', # name displaayed to the user - type = 'string', # type - required = TRUE # required? - ) - ) - job <- scripts_post_r(name = name, - source = source, - params = params, - arguments = args) - run <- scripts_post_r_runs(job$id) - await(scripts_get_r_runs, id = job$id, run_id = run$id) -} - -# Access the PET_NAME variable -source <- c(' - library(civis) - pet_name <- Sys.getenv("PET_NAME") - msg <- paste0("Hello", pet_name, "!") - print(msg) -') - -# Let's run it! Here we pass the argument 'Fitzgerald' to the -# parameter 'PET_NAME' that we created. -run_script(source, name = 'Pet Greeting', args = list(PET_NAME = 'Fitzgerald')) - -``` - -## Sharing Scripts with Templates - -Now we have a script. How can we share it with others so that they can use it? The best -way to share scripts is with `templates`. Let's start by simply posting the script above: - -```{r, eval=FALSE} -params <- list( - list( - name = 'PET_NAME', - label = 'Pet Name', - type = 'string', - required = TRUE - ) -) -job <- scripts_post_r(name = 'Pet Greeter', - source = source, - params = params) -``` - -To make this job a template use `templates_post_scripts`. Adding a notes field (markdown format) -describing what the script does, what the parameters are, and what outputs it posts is -often helpful for users. - -```{r, eval=FALSE} -note <- c(" -# Pet Greeter - -Greets your pet, given its name! - -For your pet to receive the greeting, it must be a Civis Platform -user with the ability to read. - -Parameters: - * Pet Name: string, Name of pet. - - -Returns: - * Nothing -") -template <- templates_post_scripts(script_id = job$id, note = note, name = 'Pet Greeter') -``` - -### Custom Scripts -`scripts_post_custom` creates an instance of a template that inherits all changes made to the template. -We can now make a simple program to call and run an instance of the template. - -```{r, eval=FALSE} -job <- scripts_post_custom(id, arguments = arguments, ...) -run <- scripts_post_custom_runs(job$id) -await(scripts_get_custom_runs, id = job$id, run_id = run$id) -``` - -Conveniently, `run_template` does exactly this and is already provided in `civis`. -It returns the output file ids of the job for you to use later on in your program. - -```{r, eval = FALSE} -out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES')) -``` - -To stay organized, let's automatically add the script to an existing project: - -```{r, eval = FALSE} -# We might need to find the project id first -search_list(type = 'project', 'My project Name') -out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'), - target_project_id = project_id) -``` - -### Making Changes - -To make changes to the template note or name, use `templates_patch_scripts`. - -```{r,eval=FALSE} -templates_patch_scripts(template_id$id, note = new_note) -``` - -To change the behavior, name, or parameters of the script, -update the backing script using `scripts_patch_r`. - -* Note: -It is _not recommended_ to make breaking changes to the API of a script by -adding a required parameter, changing a parameter default, or removing a run output. This will break workflows -of your users. Instead of making breaking changes, release a new version of the script. - -```{r, eval = FALSE} -source <- c(' - library(civis) - pet_name <- Sys.getenv("PET_NAME") - msg <- paste0("Hello ", pet_name, "! Would you care for a sandwich?") - print(msg) -') -scripts_patch_r(id = job$id, name = 'Pet Greeter', - source = source, - params = params) -``` - -### Discoverability - -To help share your template with others, use this link: -`https://platform.civisanalytics.com/spa/#/scripts/new/{your template id}`. - -This link will automatically direct the user to a new instance of the template. - -It's a good idea to archive unused templates so that it's easy for users to find -the right template quickly. This is important if you automatically deploy your templates. - -Let's clean up our experiment by archiving our Pet Greeter Template: - -```{r} -templates_patch_scripts(template$id, archived = TRUE) -``` - -## Conclusion - -That's it! Now go forth and productionize! - - - - diff --git a/doc/civis_scripts.html b/doc/civis_scripts.html deleted file mode 100644 index 5eaf1497..00000000 --- a/doc/civis_scripts.html +++ /dev/null @@ -1,697 +0,0 @@ - - - - - - - - - - - - - - - - -Productionizing with Civis Scripts - - - - - - - - - - - - - - - - - - - - - - - - - - -

Productionizing with Civis Scripts

-

Patrick Miller

-

2023-02-17

- - - -

Civis Scripts are the way to productionize your code with Civis -Platform. You’ve probably used three of the four types of scripts -already in the Civis Platform UI (“Code” –> “Scripts”): -language (R, -Python3, -javascript, -and sql), -container, -and custom. -If you’ve run any of these scripts in Civis Platform, you’ve already -started productionizing your code. Most loosely, -productionizing means that your code now runs on a remote server instead -of your local or development machine.

-

You probably already know some of the benefits too:

-
    -
  1. Easily schedule and automate tasks, and include tasks in -workflows.
  2. -
  3. Ensure your code doesn’t break in the future when dependencies -change.
  4. -
  5. Share code with others without them worrying about dependencies or -language compatibility.
  6. -
  7. Rapidly deploy fixes and changes.
  8. -
-

This guide will cover how to programmatically do the same tasks using -the API that you are used to doing in GUI. Instead of typing in values -for the parameters or clicking to download outputs, you can do the same -thing in your programs. Hooray for automation!

-

Specifically, this guide will cover how to programmatically read -outputs, kick off new script runs, and publish your own script templates -to share your code with others. It will make heavy use of API functions -directly, but highlight convenient wrappers for common tasks where they -have been implemented already.

-

Ready? Buckle in!

-
-

Script Concepts and Overview

-

A script is a job that executes code in Civis Platform. A script -accepts user input through parameters, gives values back to the -user as run outputs, and records any logs along the -way.

-

A script author can share language and container scripts with others -by letting users clone the script. But if an author makes a -change to the script such as fixing a bug or adding a feature, users -will have to re-clone the script to get access to those changes.

-

A better way to share code with others is with template -scripts. A template script is a ‘published’ language or container -script. The script that the template runs is the backing script -of the template.

-

Once a container or language script is published as a template, users -can create their own instances of the template. These instances are -called custom scripts and they inherit all changes made to the -template. This feature makes it easy to share code with others and to -rapidly deploy changes and fixes.

-
-
-

Quick Start

-
# create a container script with a parameter
-script <- scripts_post_containers(
-  required_resources = list(cpu = 1024, memory = 50, diskSpace = 15),
-  docker_command = 'cd /package_dir && Rscript inst/run_script.R',
-  docker_image_name = 'civisanalytics/datascience-r',
-  name = 'SCRIPT NAME',
-  params = list(
-    list(name = 'NAME_OF_ENV_VAR',
-         label = 'Name User Sees', 
-         type = 'string',
-         required = TRUE)
-  )
-)
-
-# publish the container script as a template 
-template <- templates_post_scripts(script$id, name = 'TEMPLATE NAME', note = 'Markdown Docs')
-
-# run a template script, returning file ids of run outputs
-out <- run_template(template$id)
-
-# post a file or JSONValue run output within a script
-write_job_output('filename.csv')
-json_values_post(jsonlite::toJSON(my_list), 'my_list.json')
-
-# get run output file ids of a script
-out <- fetch_output_file_ids(civis_script(id))
-
-# get csv run outputs of a script
-df <- read_civis(civis_script(id), regex = '.csv', using = read.csv)
-
-# get JSONValue run outputs
-my_list <- read_civis(civis_script(id))
-
-
-

Creating and Running Scripts

-

Let’s make these concepts concrete with an example! We’ll use the ‘R’ -language script throughout, but container scripts work -exactly the same way. In the second section, we’ll cover -custom and template scripts.

-
-

An Example Script

-

The post method creates the job and returns a list of -metadata about it, including its type.

-
source <- c('
- print("Hello World!")
-')
-job <- scripts_post_r(name = 'Hello!', source = source)
-

Each script can be uniquely identified by its job id. If you -have a job id but don’t know what kind of script it is, you can do -jobs_get(id).

-

Each script type is associated with its own API endpoints. For -instance, to post a job of each script type, you need -scripts_post_r, scripts_post_containers, -scripts_post_custom, or -templates_post_scripts.

-

This job hasn’t been run yet. To kick off a run do:

-
run <- scripts_post_r_runs(job$id)
-
-# check the status
-scripts_get_r_runs(job$id, run$id)
-
-# automatically poll until the job completes
-await(scripts_get_r_runs, id = job$id, run_id = run$id)
-

Since kicking off a job and polling until it completes is a really -common task for this guide, let’s make it a function:

-
run_script <- function(source, name = 'Cool') {
-  job <- scripts_post_r(name = name, source = source)
-  run <- scripts_post_r_runs(job$id)
-  await(scripts_get_r_runs, id = job$id, run_id = run$id)
-}
-
-
-

Run Outputs

-

This script isn’t very useful because it doesn’t produce any output -that we can access. To add an output to a job, we can use -scripts_post_r_runs_outputs. The two most common types of -run outputs are Files and JSONValues.

-
-

Files

-

We can specify adding a File as a run output by -uploading the object to S3 with write_civis_file and -setting object_type in -scripts_post_r_runs_outputs to File. Notice -that the environment variables CIVIS_JOB_ID and -CIVIS_RUN_ID are automatically inserted into the -environment for us to have access to.

-
source <- c("
- library(civis)
- data(iris)
- write.csv(iris, 'iris.csv')
- job_id <- as.numeric(Sys.getenv('CIVIS_JOB_ID'))
- run_id <- as.numeric(Sys.getenv('CIVIS_RUN_ID'))
- file_id <- write_civis_file('iris.csv')
- scripts_post_r_runs_outputs(job_id, run_id, object_type = 'File', object_id = file_id)
-")
-run <- run_script(source)
-

Since this pattern is so common, we replaced it with the function -write_job_output which you can use to post a filename as a -run output for any script type.

-
source <- c("
- library(civis)
- data(iris)
- write.csv(iris, 'iris.csv')
- write_job_output('iris.csv')
-")
-run <- run_script(source)
-
-
-

JSONValues

-

It is best practice to make run outputs as portable as possible -because the script can be called by any language. For arbitrary data, -JSONValues are often the best choice. Regardless, it is user friendly to -add the file extension to the name of the run output.

-

Adding JSONValue run outputs is common enough for it to be -implemented directly as a Civis API endpoint, -json_values_post:

-
source <- c("
- library(civis)
- library(jsonlite)
- my_farm <- list(cows = 1, ducks = list(mallard = 2, goldeneye = 1))
- json_values_post(jsonlite::toJSON(my_farm), name = 'my_farm.json')
-")
-run_farm <- run_script(source)
-

To retrieve script outputs we can use -scripts_list_r_runs_outputs:

-
out <- scripts_list_r_runs_outputs(run$rId, run$id)
-iris <- read_civis(out$objectId, using = read.csv)
-

Since this pattern is also common, you can simply use -read_civis directly. This will work for any script type. -Use regex and using to filter run outputs by -file extension, and provide the appropriate reading function. JSONValues -can be read automatically.

-
# get csv run outputs
-iris <- read_civis(civis_script(run$rId), regex = '.csv', using = read.csv)
-
-# get JSONValues
-my_farm <- read_civis(civis_script(run_farm$rId))
-
-
-
-

Script Parameters

-

Scripts are more useful if their behavior can be configured by the -user, which can be done with script parameters. Script -parameters are placeholders for input by the user. Specific -values of the parameters input by the user are called -arguments. Here, we modify run_script to -automatically add a parameter, and simultaneously take a value of that -parameter provided by the user. In the script itself, we can access the -parameter as an environment variable.

-
# Add 'params' and 'arguments' to run_script
-run_script <- function(source, args, name = 'Cool') {
-  params <- list(          # params is a list of individual parameters
-    list(
-      name = 'PET_NAME',   # name of the environment variable with the user value
-      label = 'Pet Name',  # name displaayed to the user
-      type = 'string',     # type 
-      required = TRUE      # required?
-    )
-  )
-  job <- scripts_post_r(name = name, 
-                        source = source, 
-                        params = params, 
-                        arguments = args)
-  run <- scripts_post_r_runs(job$id)
-  await(scripts_get_r_runs, id = job$id, run_id = run$id)
-}
-
-# Access the PET_NAME variable
-source <- c('
-  library(civis)
-  pet_name <- Sys.getenv("PET_NAME")
-  msg <- paste0("Hello", pet_name, "!")
-  print(msg)
-')
-
-# Let's run it! Here we pass the argument 'Fitzgerald' to the 
-# parameter 'PET_NAME' that we created.
-run_script(source, name = 'Pet Greeting', args = list(PET_NAME = 'Fitzgerald'))
-
-
-
-

Sharing Scripts with Templates

-

Now we have a script. How can we share it with others so that they -can use it? The best way to share scripts is with -templates. Let’s start by simply posting the script -above:

-
params <- list(          
-  list(
-    name = 'PET_NAME',   
-    label = 'Pet Name',  
-    type = 'string',     
-    required = TRUE      
-  )
-)
-job <- scripts_post_r(name = 'Pet Greeter', 
-                      source = source, 
-                      params = params)
-

To make this job a template use templates_post_scripts. -Adding a notes field (markdown format) describing what the script does, -what the parameters are, and what outputs it posts is often helpful for -users.

-
note <- c("
-# Pet Greeter
-
-Greets your pet, given its name! 
- 
-For your pet to receive the greeting, it must be a Civis Platform
-user with the ability to read.
- 
-Parameters:
-  * Pet Name: string, Name of pet.
-
-  
-Returns:
-  * Nothing
-")
-template <- templates_post_scripts(script_id = job$id, note = note, name = 'Pet Greeter')
-
-

Custom Scripts

-

scripts_post_custom creates an instance of a template -that inherits all changes made to the template. We can now make a simple -program to call and run an instance of the template.

-
job <- scripts_post_custom(id, arguments = arguments, ...)
-run <- scripts_post_custom_runs(job$id)
-await(scripts_get_custom_runs, id = job$id, run_id = run$id)
-

Conveniently, run_template does exactly this and is -already provided in civis. It returns the output file ids -of the job for you to use later on in your program.

-
out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'))
-

To stay organized, let’s automatically add the script to an existing -project:

-
# We might need to find the project id first
-search_list(type = 'project', 'My project Name')
-out <- run_template(template$id, arguments = list(PET_NAME = 'CHARLES'),
-                    target_project_id = project_id)
-
-
-

Making Changes

-

To make changes to the template note or name, use -templates_patch_scripts.

-
templates_patch_scripts(template_id$id, note = new_note)
-

To change the behavior, name, or parameters of the script, update the -backing script using scripts_patch_r.

-
    -
  • Note: It is not recommended to make breaking changes to the -API of a script by adding a required parameter, changing a parameter -default, or removing a run output. This will break workflows of your -users. Instead of making breaking changes, release a new version of the -script.
  • -
-
source <- c('
-  library(civis)
-  pet_name <- Sys.getenv("PET_NAME")
-  msg <- paste0("Hello ", pet_name, "! Would you care for a sandwich?")
-  print(msg)
-')
-scripts_patch_r(id = job$id, name = 'Pet Greeter',
-                source = source,
-                params = params)
-
-
-

Discoverability

-

To help share your template with others, use this link: -https://platform.civisanalytics.com/spa/#/scripts/new/{your template id}.

-

This link will automatically direct the user to a new instance of the -template.

-

It’s a good idea to archive unused templates so that it’s easy for -users to find the right template quickly. This is important if you -automatically deploy your templates.

-

Let’s clean up our experiment by archiving our Pet Greeter -Template:

-
templates_patch_scripts(template$id, archived = TRUE)
-
-
-
-

Conclusion

-

That’s it! Now go forth and productionize!

-
- - - - - - - - - - - diff --git a/doc/concurrency.R b/doc/concurrency.R deleted file mode 100644 index 5752cd0c..00000000 --- a/doc/concurrency.R +++ /dev/null @@ -1,86 +0,0 @@ -## ---- eval=FALSE-------------------------------------------------------------- -# nap <- function(seconds) { -# Sys.sleep(seconds) -# } -# -# start <- Sys.time() -# nap(1) -# nap(2) -# nap(3) -# end <- Sys.time() -# print(end - start) - -## ---- eval=FALSE-------------------------------------------------------------- -# library(future) -# library(civis) -# -# # Define a concurrent backend with enough processes so each function -# # we want to run concurrently has its own process. Here we'll need at least 2. -# plan("multiprocess", workers=10) -# -# # Load data -# data(iris) -# data(airquality) -# airquality <- airquality[!is.na(airquality$Ozone),] # remove missing in dv -# -# # Create a future for each model, using the special %<-% assignment operator. -# # These futures are created immediately, kicking off the models. -# air_model %<-% civis_ml(airquality, "Ozone", "gradient_boosting_regressor") -# iris_model %<-% civis_ml(iris, "Species", "sparse_logistic") -# -# # At this point, `air_model` has not finished training yet. That's okay, -# # the program will just wait until `air_model` is done before printing it. -# print("airquality R^2:") -# print(air_model$metrics$metrics$r_squared) -# print("iris ROC:") -# print(iris_model$metrics$metrics$roc_auc) - -## ---- eval=FALSE-------------------------------------------------------------- -# library(parallel) -# library(doParallel) -# library(foreach) -# library(civis) -# -# # Register a local cluster with enough processes so each function -# # we want to run concurrently has its own process. Here we'll -# # need at least 3, with 1 for each model_type in model_types. -# cluster <- makeCluster(10) -# registerDoParallel(cluster) -# -# # Model types to build -# model_types <- c("sparse_logistic", -# "gradient_boosting_classifier", -# "random_forest_classifier") -# -# # Load data -# data(iris) -# -# # Listen for multiple models to complete concurrently -# model_results <- foreach(model_type=iter(model_types), .packages='civis') %dopar% { -# civis_ml(iris, "Species", model_type) -# } -# stopCluster(cluster) -# print("ROC Results") -# lapply(model_results, function(result) result$metrics$metrics$roc_auc) - -## ---- eval=FALSE-------------------------------------------------------------- -# library(civis) -# library(parallel) -# -# # Model types to build -# model_types <- c("sparse_logistic", -# "gradient_boosting_classifier", -# "random_forest_classifier") -# -# # Load data -# data(iris) -# -# # Loop over all models in parallel with a max of 10 processes -# model_results <- mclapply(model_types, function(model_type) { -# civis_ml(iris, "Species", model_type) -# }, mc.cores=10) -# -# # Wait for all models simultaneously -# print("ROC Results") -# lapply(model_results, function(result) result$metrics$metrics$roc_auc) - diff --git a/doc/concurrency.Rmd b/doc/concurrency.Rmd deleted file mode 100644 index fbfbb152..00000000 --- a/doc/concurrency.Rmd +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: "Making Simultaneous Calls to Platform" -date: "2017-08-14" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Asychronous Programming} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -## Concurrency in the Civis R Client - -Just like most functions in R, all functions in `civis` block. This means -that each function in a program must complete before the next function runs. -For instance, - -```{r, eval=FALSE} -nap <- function(seconds) { - Sys.sleep(seconds) -} - -start <- Sys.time() -nap(1) -nap(2) -nap(3) -end <- Sys.time() -print(end - start) -``` - -This program takes 6 seconds to complete, since it takes 1 second for the -first `nap`, 2 for the second and 3 for the last. This program is easy to -reason about because each function is sequentially executed. Usually, that -is how we want our programs to run. - -There are some exceptions to this rule. Sequentially executing each function -might be inconvenient if each `nap` took 30 minutes instead of a few seconds. -In that case, we might like our program to perform all 3 naps simultaneously. -In the above example, running all 3 naps simultaneously would take 3 seconds -(the length of the longest nap) rather than 6 seconds. - -As all function calls in `civis` block, `civis` relies on the mature -R ecosystem for parallel programming to enable multiple simultaneous -tasks. The three packages we introduce are `future`, `foreach`, and -`parallel` (included in base R). For all packages, simultaneous tasks are -enabled by starting each task in a separate R process. Examples for building -several models in parallel with different libraries are included below. The -libraries have strengths and weaknesses and choosing which library to use is -often a matter of preference. - -It is important to note that when calling `civis` functions, the computation -required to complete the task takes place in Platform. For instance, during -a call to `civis_ml`, Platform builds the model while your laptop waits -for the task to complete. This means that you don't have to worry about -running out of memory or cpu cores on your laptop when training dozens of -models, or when scoring a model on a very large population. The task being -parallelized in the code below is simply the task of waiting for Platform to -send results back to your laptop. - - -## Building Many Models with `future` - -```{r, eval=FALSE} -library(future) -library(civis) - -# Define a concurrent backend with enough processes so each function -# we want to run concurrently has its own process. Here we'll need at least 2. -plan("multiprocess", workers=10) - -# Load data -data(iris) -data(airquality) -airquality <- airquality[!is.na(airquality$Ozone),] # remove missing in dv - -# Create a future for each model, using the special %<-% assignment operator. -# These futures are created immediately, kicking off the models. -air_model %<-% civis_ml(airquality, "Ozone", "gradient_boosting_regressor") -iris_model %<-% civis_ml(iris, "Species", "sparse_logistic") - -# At this point, `air_model` has not finished training yet. That's okay, -# the program will just wait until `air_model` is done before printing it. -print("airquality R^2:") -print(air_model$metrics$metrics$r_squared) -print("iris ROC:") -print(iris_model$metrics$metrics$roc_auc) -``` - - -## Building Many Models with `foreach` - -```{r, eval=FALSE} -library(parallel) -library(doParallel) -library(foreach) -library(civis) - -# Register a local cluster with enough processes so each function -# we want to run concurrently has its own process. Here we'll -# need at least 3, with 1 for each model_type in model_types. -cluster <- makeCluster(10) -registerDoParallel(cluster) - -# Model types to build -model_types <- c("sparse_logistic", - "gradient_boosting_classifier", - "random_forest_classifier") - -# Load data -data(iris) - -# Listen for multiple models to complete concurrently -model_results <- foreach(model_type=iter(model_types), .packages='civis') %dopar% { - civis_ml(iris, "Species", model_type) -} -stopCluster(cluster) -print("ROC Results") -lapply(model_results, function(result) result$metrics$metrics$roc_auc) -``` - -## Building Many Models with `mcparallel` - -Note: `mcparallel` relies on forking and thus is not available on Windows. - -```{r, eval=FALSE} -library(civis) -library(parallel) - -# Model types to build -model_types <- c("sparse_logistic", - "gradient_boosting_classifier", - "random_forest_classifier") - -# Load data -data(iris) - -# Loop over all models in parallel with a max of 10 processes -model_results <- mclapply(model_types, function(model_type) { - civis_ml(iris, "Species", model_type) -}, mc.cores=10) - -# Wait for all models simultaneously -print("ROC Results") -lapply(model_results, function(result) result$metrics$metrics$roc_auc) -``` - -## Operating System / Environment Specific Errors - -Differences in operating systems and R environments may cause errors for -some users of the parallel libraries listed above. In particular, -`mclapply` does not work on Windows and may not work in RStudio on -certain operating systems. `future` may require `plan(multisession)` on -certain operating systems. If you encounter an error parallelizing -functions in `civis`, we recommend first trying more than one method -listed above. While we will address errors specific to `civis` with -regards to parallel code, the technicalities of parallel libraries in -R across operating systems and environments prevent us from providing -more general support for issues regarding parallelized code in R. diff --git a/doc/concurrency.html b/doc/concurrency.html deleted file mode 100644 index d1c192d8..00000000 --- a/doc/concurrency.html +++ /dev/null @@ -1,499 +0,0 @@ - - - - - - - - - - - - - - - -Making Simultaneous Calls to Platform - - - - - - - - - - - - - - - - - - - - - - - - - - -

Making Simultaneous Calls to Platform

-

2017-08-14

- - - -
-

Concurrency in the Civis R Client

-

Just like most functions in R, all functions in civis -block. This means that each function in a program must complete before -the next function runs. For instance,

-
nap <- function(seconds) {
-    Sys.sleep(seconds)
-}
-
-start <- Sys.time()
-nap(1)
-nap(2)
-nap(3)
-end <- Sys.time()
-print(end - start)
-

This program takes 6 seconds to complete, since it takes 1 second for -the first nap, 2 for the second and 3 for the last. This -program is easy to reason about because each function is sequentially -executed. Usually, that is how we want our programs to run.

-

There are some exceptions to this rule. Sequentially executing each -function might be inconvenient if each nap took 30 minutes -instead of a few seconds. In that case, we might like our program to -perform all 3 naps simultaneously. In the above example, running all 3 -naps simultaneously would take 3 seconds (the length of the longest nap) -rather than 6 seconds.

-

As all function calls in civis block, civis -relies on the mature R ecosystem for parallel programming to enable -multiple simultaneous tasks. The three packages we introduce are -future, foreach, and parallel -(included in base R). For all packages, simultaneous tasks are enabled -by starting each task in a separate R process. Examples for building -several models in parallel with different libraries are included below. -The libraries have strengths and weaknesses and choosing which library -to use is often a matter of preference.

-

It is important to note that when calling civis -functions, the computation required to complete the task takes place in -Platform. For instance, during a call to civis_ml, Platform -builds the model while your laptop waits for the task to complete. This -means that you don’t have to worry about running out of memory or cpu -cores on your laptop when training dozens of models, or when scoring a -model on a very large population. The task being parallelized in the -code below is simply the task of waiting for Platform to send results -back to your laptop.

-
-
-

Building Many Models with future

-
library(future)
-library(civis)
-
-# Define a concurrent backend with enough processes so each function
-# we want to run concurrently has its own process. Here we'll need at least 2.
-plan("multiprocess", workers=10)
-
-# Load data
-data(iris)
-data(airquality)
-airquality <- airquality[!is.na(airquality$Ozone),]  # remove missing in dv
-
-# Create a future for each model, using the special %<-% assignment operator.
-# These futures are created immediately, kicking off the models.
-air_model %<-% civis_ml(airquality, "Ozone", "gradient_boosting_regressor")
-iris_model %<-% civis_ml(iris, "Species", "sparse_logistic")
-
-# At this point, `air_model` has not finished training yet. That's okay,
-# the program will just wait until `air_model` is done before printing it.
-print("airquality R^2:")
-print(air_model$metrics$metrics$r_squared)
-print("iris ROC:")
-print(iris_model$metrics$metrics$roc_auc)
-
-
-

Building Many Models with foreach

-
library(parallel)
-library(doParallel)
-library(foreach)
-library(civis)
-
-# Register a local cluster with enough processes so each function
-# we want to run concurrently has its own process. Here we'll
-# need at least 3, with 1 for each model_type in model_types.
-cluster <- makeCluster(10)
-registerDoParallel(cluster)
-
-# Model types to build
-model_types <- c("sparse_logistic",
-                 "gradient_boosting_classifier",
-                 "random_forest_classifier")
-
-# Load data
-data(iris)
-
-# Listen for multiple models to complete concurrently
-model_results <- foreach(model_type=iter(model_types), .packages='civis') %dopar% {
-    civis_ml(iris, "Species", model_type)
-}
-stopCluster(cluster)
-print("ROC Results")
-lapply(model_results, function(result) result$metrics$metrics$roc_auc)
-
-
-

Building Many Models with mcparallel

-

Note: mcparallel relies on forking and thus is not -available on Windows.

-
library(civis)
-library(parallel)
-
-# Model types to build
-model_types <- c("sparse_logistic",
-                 "gradient_boosting_classifier",
-                 "random_forest_classifier")
-
-# Load data
-data(iris)
-
-# Loop over all models in parallel with a max of 10 processes
-model_results <- mclapply(model_types, function(model_type) {
-  civis_ml(iris, "Species", model_type)
-}, mc.cores=10)
-
-# Wait for all models simultaneously
-print("ROC Results")
-lapply(model_results, function(result) result$metrics$metrics$roc_auc)
-
-
-

Operating System / Environment Specific Errors

-

Differences in operating systems and R environments may cause errors -for some users of the parallel libraries listed above. In particular, -mclapply does not work on Windows and may not work in -RStudio on certain operating systems. future may require -plan(multisession) on certain operating systems. If you -encounter an error parallelizing functions in civis, we -recommend first trying more than one method listed above. While we will -address errors specific to civis with regards to parallel -code, the technicalities of parallel libraries in R across operating -systems and environments prevent us from providing more general support -for issues regarding parallelized code in R.

-
- - - - - - - - - - - diff --git a/doc/data_import_and_export.R b/doc/data_import_and_export.R deleted file mode 100644 index 62029d9d..00000000 --- a/doc/data_import_and_export.R +++ /dev/null @@ -1,81 +0,0 @@ -## ----eval=FALSE--------------------------------------------------------------- -# df <- read_civis("schema.tablename", database = "my-database") - -## ----eval=FALSE--------------------------------------------------------------- -# options(civis.default_db = "my-database") -# df <- read_civis("schema.tablename") - -## ----eval=FALSE--------------------------------------------------------------- -# query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" -# df <- read_civis(sql(query)) - -## ---- eval=FALSE-------------------------------------------------------------- -# data(iris) -# id <- write_civis_file(iris) -# df <- read_civis(id) - -## ----eval=FALSE--------------------------------------------------------------- -# query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" -# df <- read_civis(sql(query), colClasses = "character") -# df2 <- read_civis(sql(query), as.is = TRUE) - -## ----eval=FALSE--------------------------------------------------------------- -# options(civis.default_db = "my_database") -# df <- data.frame(x = rnorm(100), y = rnorm(100), z = rnorm(100)) -# write_civis(df, tablename = "schema.tablename", -# distkey = "id", sortkey1 = "date", sortkey2 = "type") - -## ----eval=FALSE--------------------------------------------------------------- -# write_civis(df, tablename = "schema.tablename", if_exists = "append") -# write_civis(df, tablename = "schema.tablename", if_exists = "truncate") - -## ---- eval=FALSE-------------------------------------------------------------- -# write_civis("~/path/to/my_data.csv", tablename="schema.tablename") - -## ---- eval = FALSE------------------------------------------------------------ -# # Upload a data frame -# data(iris) -# id <- write_civis_file(iris) -# iris2 <- read_civis(id) -# -# # Upload an arbitrary R object -# farm <- list(chickens = 1, ducks = 4, pigs = 2, cows = 1) -# id <- write_civis_file(farm) -# farm2 <- read_civis(id, using = readRDS) -# - -## ---- eval = FALSE------------------------------------------------------------ -# id <- write_civis_file("path/to/my_data.json") -# read_civis(id, using = jsonlite::fromJSON) - -## ----eval=FALSE--------------------------------------------------------------- -# query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" -# download_civis(sql(query), file = "path/to/my_file.csv") -# download_civis("schema.tablename", file = "path/to/my_file.csv") -# -# id <- write_civis_file(iris) -# download_civis(id, file = "path/to/my_iris.rds") - -## ----eval=FALSE--------------------------------------------------------------- -# q_res <- query_civis("GRANT ALL ON schema.my_table TO GROUP admin") - -## ---- eval = FALSE------------------------------------------------------------ -# id <- q_res$id -# query_civis(id) - -## ---- eval=FALSE-------------------------------------------------------------- -# Error in api_key() : -# The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '') - -## ---- eval=FALSE-------------------------------------------------------------- -# read_civis(sql("SELECT * FROM schema.tablename WHERE 1 = 0")) -# Error in download_script_results(run$script_id, run$run_id) : -# Query produced no output. - -## ---- eval=FALSE-------------------------------------------------------------- -# Error in get_db(database) : -# Argument database is NULL and options("civis.default_db") not set. Set this option using options(civis.default_db = "my_database") - -## ---- eval=FALSE-------------------------------------------------------------- -# sapply(databases_list(), function(x) x$name) - diff --git a/doc/data_import_and_export.Rmd b/doc/data_import_and_export.Rmd deleted file mode 100644 index 5d364e76..00000000 --- a/doc/data_import_and_export.Rmd +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: "Data Import and Export" -author: "Patrick Miller, Keith Ingersoll" -date: "2017-08-14" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Civis IO} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -## Passing Data Back and Forth - -Often the simplest, but most useful operation when working with the -Platform is to move data in and out. From the perspective of the R -client, we call moving data from the Platform to the local -machine *reading*. Likewise, moving data from the local -machine to the Platform is called *writing*. - -The `civis` client handles data imports and exports in two basic ways: - -1. Moving data directly between the R workspace and the Platform (the most common use case). -2. Moving data between the Platform and local csv files (this is useful for large data that doesn't fit into memory). - -Data can be stored on Platform in two places: - -1. Amazon Redshift, a SQL database. -2. Amazon S3, also referred to as the 'files' endpoint. - -Tables in Redshift are accessed and modified using SQL queries. Tables in Redshift -can be easily shared and used in multiple workflows by multiple people. -However, importing and exporting even small files on Redshift can be slow. - -R objects and arbitrary files can be stored on Amazon S3, and are accessed -using a numeric file id. Data frames are uploaded as CSVs for portability, -and arbitrary R objects are serialized using `saveRDS` for speed and efficiency. - -## Reading Data Into R From Platform - -The main workhorse for getting data from Platform is `read_civis`. -This function is designed to work similarly to the built in function -`read.csv`, returning a dataframe from a table in Platform. For more flexibility, -`read_civis` can download files from Redshift using an SQL query, or download a -file from S3 ('the files endpoint') using a file id. - -To read from a table in Platform, simply provide the name of the schema, -table within the schema, and the database: - -```{r eval=FALSE} -df <- read_civis("schema.tablename", database = "my-database") -``` - -For convenience, a default database can be set in the package options, and not specified -in further calls to any IO function. If there is only one database available, this -database will automatically be used as the default. -In the examples that follow, we assume that a default database has been set. - -```{r eval=FALSE} -options(civis.default_db = "my-database") -df <- read_civis("schema.tablename") -``` - -`read_civis` accepts SQL queries when more flexibility is needed. This is accomplished -by wrapping `sql(...)` around a string containing the query. With `read_civis`, -queries are always read only, and always return a `data.frame`. - -```{r eval=FALSE} -query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" -df <- read_civis(sql(query)) -``` - -Finally, `read_civis` accepts a file id as the first argument to read in files -from S3 as data frames. IDs are obtained from `write_civis_file`. - -```{r, eval=FALSE} -data(iris) -id <- write_civis_file(iris) -df <- read_civis(id) -``` - -For maximum flexibility, `read_civis` accepts parameters from `read.csv` which -can be used to define data types when the defaults are not appropriate. -For instance, when numbers should be read in as characters or when strings -shouldn't be read in as factors. - -```{r eval=FALSE} -query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" -df <- read_civis(sql(query), colClasses = "character") -df2 <- read_civis(sql(query), as.is = TRUE) -``` - - -## Uploading Data to a Database - -The complement to reading data into the R workspace is writing data -to the Platform. The function `write_civis` uploads data frames or csv files to -an Amazon Redshift database. The function `write_civis_file` uploads R objects and -arbitrary files to Amazon S3 (the files endpoint). - -When creating a new table, `write_civis` relies on Platform to determine -data types. Distkeys and sortkeys can optionally be set to improve query performance. -Again, we set a default database in these examples for convenience. - -```{r eval=FALSE} -options(civis.default_db = "my_database") -df <- data.frame(x = rnorm(100), y = rnorm(100), z = rnorm(100)) -write_civis(df, tablename = "schema.tablename", - distkey = "id", sortkey1 = "date", sortkey2 = "type") -``` - -By default, `write_civis` will fail if the table passed in `tablename` -already exists. Optionally, `write_civis` can append to an existing -table. It may also delete all rows and then append (truncate). If -specific datatypes are required, a table may first be created with a -SQL `CREATE TABLE` command and then data can be inserted with -`write_civis`. - -```{r eval=FALSE} -write_civis(df, tablename = "schema.tablename", if_exists = "append") -write_civis(df, tablename = "schema.tablename", if_exists = "truncate") -``` - -If a csv file is saved to disk but not loaded in the R workspace, -`write_civis` will upload the csv to Platform without needing -first load the csv into RAM. This can save time when a file is large. -Uploading a csv directly to Platform is done by simply passing the file name -and path to `write_civis` as the first argument: - -```{r, eval=FALSE} -write_civis("~/path/to/my_data.csv", tablename="schema.tablename") -``` - -## Uploading Data to S3 - -Finally, `write_civis_file` uploads data frames, R objects and files to Amazon S3, which is also -referred to as the 'files endpoint.' Data frames are uploaded as CSVs. -R objects saved to the files endpoint and are serialized using `saveRDS`. - -Data frames and R objects can be loaded back into memory by passing the -file id to `read_civis`, and an appropriate `using` argument. - -```{r, eval = FALSE} -# Upload a data frame -data(iris) -id <- write_civis_file(iris) -iris2 <- read_civis(id) - -# Upload an arbitrary R object -farm <- list(chickens = 1, ducks = 4, pigs = 2, cows = 1) -id <- write_civis_file(farm) -farm2 <- read_civis(id, using = readRDS) - -``` - -When passed a file name and path, `write_civis_file` will upload the file to S3 as-is. -To read the file back into memory, an appropriate function to convert the -file to a data frame must be provided to the `using` argument of `read_civis`. -For example, a JSON file can be read back into R using `jsonlite::fromJSON`. - -```{r, eval = FALSE} -id <- write_civis_file("path/to/my_data.json") -read_civis(id, using = jsonlite::fromJSON) -``` - - -## Downloading Large Data Sets from Platform. - -Occasionally, a table may be too large to store in memory. `download_civis` -can be used in place of `read_civis` to download data straight to disk from Platform. - -Like `read_civis`, `download_civis` can download files from Amazon Redshift by passing -`schema.tablename`, or `sql(...)` as the first argument. Files can be downloaded from -Amazon S3 by passing the file id to `download_civis`. - -```{r eval=FALSE} -query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23" -download_civis(sql(query), file = "path/to/my_file.csv") -download_civis("schema.tablename", file = "path/to/my_file.csv") - -id <- write_civis_file(iris) -download_civis(id, file = "path/to/my_iris.rds") -``` - - -## Running Queries on Platform - -Arbitrary queries can be run on Redshift using `query_civis`, which returns the meta-data -of the query. - -```{r eval=FALSE} -q_res <- query_civis("GRANT ALL ON schema.my_table TO GROUP admin") -``` - -Existing queries can be re-run by passing the query id to `query_civis`: - -```{r, eval = FALSE} -id <- q_res$id -query_civis(id) -``` - -## Common Errors - -#### Civis API key not properly set or has expired. -Often an improper API key will return an error like below: -```{r, eval=FALSE} - Error in api_key() : - The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '') -``` - -However, there may be cases where the errors are less straightforward. It -is a good idea to test that API credentials are properly set with a simple -call such as `civis::users_list_me()`. See the README to set -up API keys correctly. - -#### Query does not return any results. -This may happen if a table is empty -or when no rows match a `WHERE` statement. To fix, double check that -the query is correct or the table is not empty. - -```{r, eval=FALSE} -read_civis(sql("SELECT * FROM schema.tablename WHERE 1 = 0")) -Error in download_script_results(run$script_id, run$run_id) : - Query produced no output. -``` - -#### Database not set correctly. -For both `read_civis` and `write_civis`, the database must be set to the -correct, case sensitive name (not hostname) of -the database. - -```{r, eval=FALSE} - Error in get_db(database) : - Argument database is NULL and options("civis.default_db") not set. Set this option using options(civis.default_db = "my_database") -``` - -To see a complete list of database names, run: - -```{r, eval=FALSE} -sapply(databases_list(), function(x) x$name) -``` diff --git a/doc/data_import_and_export.html b/doc/data_import_and_export.html deleted file mode 100644 index fdc2c624..00000000 --- a/doc/data_import_and_export.html +++ /dev/null @@ -1,551 +0,0 @@ - - - - - - - - - - - - - - - - -Data Import and Export - - - - - - - - - - - - - - - - - - - - - - - - - - -

Data Import and Export

-

Patrick Miller, Keith Ingersoll

-

2017-08-14

- - - -
-

Passing Data Back and Forth

-

Often the simplest, but most useful operation when working with the -Platform is to move data in and out. From the perspective of the R -client, we call moving data from the Platform to the local machine -reading. Likewise, moving data from the local machine to the -Platform is called writing.

-

The civis client handles data imports and exports in two -basic ways:

-
    -
  1. Moving data directly between the R workspace and the Platform (the -most common use case).
  2. -
  3. Moving data between the Platform and local csv files (this is useful -for large data that doesn’t fit into memory).
  4. -
-

Data can be stored on Platform in two places:

-
    -
  1. Amazon Redshift, a SQL database.
  2. -
  3. Amazon S3, also referred to as the ‘files’ endpoint.
  4. -
-

Tables in Redshift are accessed and modified using SQL queries. -Tables in Redshift can be easily shared and used in multiple workflows -by multiple people. However, importing and exporting even small files on -Redshift can be slow.

-

R objects and arbitrary files can be stored on Amazon S3, and are -accessed using a numeric file id. Data frames are uploaded as CSVs for -portability, and arbitrary R objects are serialized using -saveRDS for speed and efficiency.

-
-
-

Reading Data Into R From Platform

-

The main workhorse for getting data from Platform is -read_civis. This function is designed to work similarly to -the built in function read.csv, returning a dataframe from -a table in Platform. For more flexibility, read_civis can -download files from Redshift using an SQL query, or download a file from -S3 (‘the files endpoint’) using a file id.

-

To read from a table in Platform, simply provide the name of the -schema, table within the schema, and the database:

-
df <- read_civis("schema.tablename", database = "my-database")
-

For convenience, a default database can be set in the package -options, and not specified in further calls to any IO function. If there -is only one database available, this database will automatically be used -as the default. In the examples that follow, we assume that a default -database has been set.

-
options(civis.default_db = "my-database")
-df <- read_civis("schema.tablename")
-

read_civis accepts SQL queries when more flexibility is -needed. This is accomplished by wrapping sql(...) around a -string containing the query. With read_civis, queries are -always read only, and always return a data.frame.

-
query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23"
-df <- read_civis(sql(query))
-

Finally, read_civis accepts a file id as the first -argument to read in files from S3 as data frames. IDs are obtained from -write_civis_file.

-
data(iris)
-id <- write_civis_file(iris)
-df <- read_civis(id)
-

For maximum flexibility, read_civis accepts parameters -from read.csv which can be used to define data types when -the defaults are not appropriate. For instance, when numbers should be -read in as characters or when strings shouldn’t be read in as -factors.

-
query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23"
-df <- read_civis(sql(query), colClasses = "character")
-df2 <- read_civis(sql(query), as.is = TRUE)
-
-
-

Uploading Data to a Database

-

The complement to reading data into the R workspace is writing data -to the Platform. The function write_civis uploads data -frames or csv files to an Amazon Redshift database. The function -write_civis_file uploads R objects and arbitrary files to -Amazon S3 (the files endpoint).

-

When creating a new table, write_civis relies on -Platform to determine data types. Distkeys and sortkeys can optionally -be set to improve query performance. Again, we set a default database in -these examples for convenience.

-
options(civis.default_db = "my_database")
-df <- data.frame(x = rnorm(100), y = rnorm(100), z = rnorm(100))
-write_civis(df, tablename = "schema.tablename",
-            distkey = "id", sortkey1 = "date", sortkey2 = "type")
-

By default, write_civis will fail if the table passed in -tablename already exists. Optionally, -write_civis can append to an existing table. It may also -delete all rows and then append (truncate). If specific datatypes are -required, a table may first be created with a SQL -CREATE TABLE command and then data can be inserted with -write_civis.

-
write_civis(df, tablename = "schema.tablename", if_exists = "append")
-write_civis(df, tablename = "schema.tablename", if_exists = "truncate")
-

If a csv file is saved to disk but not loaded in the R workspace, -write_civis will upload the csv to Platform without needing -first load the csv into RAM. This can save time when a file is large. -Uploading a csv directly to Platform is done by simply passing the file -name and path to write_civis as the first argument:

-
write_civis("~/path/to/my_data.csv", tablename="schema.tablename")
-
-
-

Uploading Data to S3

-

Finally, write_civis_file uploads data frames, R objects -and files to Amazon S3, which is also referred to as the ‘files -endpoint.’ Data frames are uploaded as CSVs. R objects saved to the -files endpoint and are serialized using saveRDS.

-

Data frames and R objects can be loaded back into memory by passing -the file id to read_civis, and an appropriate -using argument.

-
# Upload a data frame
-data(iris)
-id <- write_civis_file(iris)
-iris2 <- read_civis(id)
-
-# Upload an arbitrary R object
-farm <- list(chickens = 1, ducks = 4, pigs = 2, cows = 1)
-id <- write_civis_file(farm)
-farm2 <- read_civis(id, using = readRDS)
-

When passed a file name and path, write_civis_file will -upload the file to S3 as-is. To read the file back into memory, an -appropriate function to convert the file to a data frame must be -provided to the using argument of read_civis. -For example, a JSON file can be read back into R using -jsonlite::fromJSON.

-
id <- write_civis_file("path/to/my_data.json")
-read_civis(id, using = jsonlite::fromJSON)
-
-
-

Downloading Large Data Sets from Platform.

-

Occasionally, a table may be too large to store in memory. -download_civis can be used in place of -read_civis to download data straight to disk from -Platform.

-

Like read_civis, download_civis can -download files from Amazon Redshift by passing -schema.tablename, or sql(...) as the first -argument. Files can be downloaded from Amazon S3 by passing the file id -to download_civis.

-
query <- "SELECT * FROM table JOIN other_table USING id WHERE var1 < 23"
-download_civis(sql(query), file = "path/to/my_file.csv")
-download_civis("schema.tablename", file = "path/to/my_file.csv")
-
-id <- write_civis_file(iris)
-download_civis(id, file = "path/to/my_iris.rds")
-
-
-

Running Queries on Platform

-

Arbitrary queries can be run on Redshift using -query_civis, which returns the meta-data of the query.

-
q_res <- query_civis("GRANT ALL ON schema.my_table TO GROUP admin")
-

Existing queries can be re-run by passing the query id to -query_civis:

-
id <- q_res$id
-query_civis(id)
-
-
-

Common Errors

-
-

Civis API key not properly set or has expired.

-

Often an improper API key will return an error like below:

-
 Error in api_key() : 
-  The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '<api_key>') 
-

However, there may be cases where the errors are less -straightforward. It is a good idea to test that API credentials are -properly set with a simple call such as -civis::users_list_me(). See the README to set up API keys -correctly.

-
-
-

Query does not return any results.

-

This may happen if a table is empty or when no rows match a -WHERE statement. To fix, double check that the query is -correct or the table is not empty.

-
read_civis(sql("SELECT * FROM schema.tablename WHERE 1 = 0"))
-Error in download_script_results(run$script_id, run$run_id) : 
-  Query produced no output. 
-
-
-

Database not set correctly.

-

For both read_civis and write_civis, the -database must be set to the correct, case sensitive name (not hostname) -of the database.

-
 Error in get_db(database) : 
-  Argument database is NULL and options("civis.default_db") not set. Set this option using options(civis.default_db = "my_database") 
-

To see a complete list of database names, run:

-
sapply(databases_list(), function(x) x$name)
-
-
- - - - - - - - - - - diff --git a/doc/quick_start.R b/doc/quick_start.R deleted file mode 100644 index 0a3ae8bc..00000000 --- a/doc/quick_start.R +++ /dev/null @@ -1,57 +0,0 @@ -## ---- eval=FALSE-------------------------------------------------------------- -# name <- civis::users_list_me()$name -# paste(name, "is really awesome!") - -## ---- eval=FALSE-------------------------------------------------------------- -# library(civis) -# -# # First we'll load a dataframe of the famous iris dataset -# data(iris) -# -# # We'll set a default database and define the table where want to -# # store our data -# options(civis.default_db = "my_database") -# iris_tablename <- "my_schema.my_table" -# -# # Next we'll push the data to the database table -# write_civis(iris, iris_tablename) -# -# # Great, now let's read it back -# df <- read_civis(iris_tablename) -# -# # Hmmm, I'm more partial to setosa myself. Let's write a custom sql query. -# # We'll need to wrap our query string in `sql` to let read_civis know we -# # are passing in a sql command rather than a tablename. -# query_str <- paste("SELECT * FROM", iris_tablename, "WHERE Species = 'setosa'") -# iris_setosa <- read_civis(sql(query_str)) -# -# # Now let's store this data along with a note as a serialized R object -# # on a remote file system. We could store any object remotely this way. -# data <- list(data = iris_setosa, special_note = "The best iris species") -# file_id <- write_civis_file(data) -# -# # Finally, let's read back our data from the remote file system. -# data2 <- read_civis(file_id) -# data2[["special_note"]] -# -# ## [1] "The best iris species" - -## ---- eval=FALSE-------------------------------------------------------------- -# library(civis) -# -# # It really is a great dataset -# data(iris) -# -# # Gradient boosting or random forest, who will win? -# gb_model <- civis_ml_gradient_boosting_classifier(iris, "Species") -# rf_model <- civis_ml_random_forest_classifier(iris, "Species") -# macroavgs <- list(gb_model = gb_model$metrics$metrics$roc_auc_macroavg, -# rf_model = rf_model$metrics$metrics$roc_auc_macroavg) -# macroavgs -# -# ## $gb_model -# ## [1] 0.9945333 -# ## -# ## $rf_model -# ## [1] 0.9954667 - diff --git a/doc/quick_start.Rmd b/doc/quick_start.Rmd deleted file mode 100644 index 00ec7950..00000000 --- a/doc/quick_start.Rmd +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: "Getting Started" -date: "2017-08-21" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Getting Started} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -## A (Very) Quick Start - -To begin, make sure `civis` is installed and your -[API key is in your R environment](https://civisanalytics.github.io/civis-r/#installation). -You can quickly test that `civis` is working by invoking - -```{r, eval=FALSE} -name <- civis::users_list_me()$name -paste(name, "is really awesome!") -``` - -If `civis` is working, you'll see a friendly message. Otherwise, you might see an -error like this when `civis` wasn't installed properly: - -``` -Error in loadNamespace(name) : there is no package called 'civis' -``` - -or like this if you haven't set your API key correctly: - -``` -Error in api_key() : The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '') -``` - -With `civis`, moving data to and from the cloud takes only a few lines of code. -Your data can be stored as rows in a table, CSVs on remote file system or even -as serialized R objects like nested lists. For example, - -```{r, eval=FALSE} -library(civis) - -# First we'll load a dataframe of the famous iris dataset -data(iris) - -# We'll set a default database and define the table where want to -# store our data -options(civis.default_db = "my_database") -iris_tablename <- "my_schema.my_table" - -# Next we'll push the data to the database table -write_civis(iris, iris_tablename) - -# Great, now let's read it back -df <- read_civis(iris_tablename) - -# Hmmm, I'm more partial to setosa myself. Let's write a custom sql query. -# We'll need to wrap our query string in `sql` to let read_civis know we -# are passing in a sql command rather than a tablename. -query_str <- paste("SELECT * FROM", iris_tablename, "WHERE Species = 'setosa'") -iris_setosa <- read_civis(sql(query_str)) - -# Now let's store this data along with a note as a serialized R object -# on a remote file system. We could store any object remotely this way. -data <- list(data = iris_setosa, special_note = "The best iris species") -file_id <- write_civis_file(data) - -# Finally, let's read back our data from the remote file system. -data2 <- read_civis(file_id) -data2[["special_note"]] - -## [1] "The best iris species" -``` - -`civis` also includes functionality for working with CivisML, Civis' machine -learning ecosystem. With the combined power of CivisML and `civis`, you can build -models in the cloud where the models can use as much memory as they need and -there’s no chance of your laptop crashing. - -```{r, eval=FALSE} -library(civis) - -# It really is a great dataset -data(iris) - -# Gradient boosting or random forest, who will win? -gb_model <- civis_ml_gradient_boosting_classifier(iris, "Species") -rf_model <- civis_ml_random_forest_classifier(iris, "Species") -macroavgs <- list(gb_model = gb_model$metrics$metrics$roc_auc_macroavg, - rf_model = rf_model$metrics$metrics$roc_auc_macroavg) -macroavgs - -## $gb_model -## [1] 0.9945333 -## -## $rf_model -## [1] 0.9954667 -``` - -For a comprehensive list of functions in `civis`, see -[Reference](https://civisanalytics.github.io/civis-r/reference/index.html) in the [full -documentation](https://civisanalytics.github.io/civis-r/). The full documentation also -includes a set of `Articles` for detailed documentation on common workflows, including -[data manipulation](https://civisanalytics.github.io/civis-r/articles/data_import_and_export.html) and [building models in parallel](https://civisanalytics.github.io/civis-r/articles/concurrency.html). \ No newline at end of file diff --git a/doc/quick_start.html b/doc/quick_start.html deleted file mode 100644 index d21c4c2d..00000000 --- a/doc/quick_start.html +++ /dev/null @@ -1,441 +0,0 @@ - - - - - - - - - - - - - - - -Getting Started - - - - - - - - - - - - - - - - - - - - - - - - - - -

Getting Started

-

2017-08-21

- - - -
-

A (Very) Quick Start

-

To begin, make sure civis is installed and your API key is -in your R environment. You can quickly test that civis -is working by invoking

-
name <- civis::users_list_me()$name
-paste(name, "is really awesome!")
-

If civis is working, you’ll see a friendly message. -Otherwise, you might see an error like this when civis -wasn’t installed properly:

-
Error in loadNamespace(name) : there is no package called 'civis'
-

or like this if you haven’t set your API key correctly:

-
Error in api_key() : The environmental variable CIVIS_API_KEY is not set. Add this to your .Renviron or call Sys.setenv(CIVIS_API_KEY = '<api_key>')
-

With civis, moving data to and from the cloud takes only -a few lines of code. Your data can be stored as rows in a table, CSVs on -remote file system or even as serialized R objects like nested lists. -For example,

-
library(civis)
-
-# First we'll load a dataframe of the famous iris dataset
-data(iris)
-
-# We'll set a default database and define the table where want to
-# store our data
-options(civis.default_db = "my_database")
-iris_tablename <- "my_schema.my_table"
-
-# Next we'll push the data to the database table
-write_civis(iris, iris_tablename)
-
-# Great, now let's read it back
-df <- read_civis(iris_tablename)
-
-# Hmmm, I'm more partial to setosa myself. Let's write a custom sql query.
-# We'll need to wrap our query string in `sql` to let read_civis know we
-# are passing in a sql command rather than a tablename.
-query_str <- paste("SELECT * FROM", iris_tablename, "WHERE Species = 'setosa'")
-iris_setosa <- read_civis(sql(query_str))
-
-# Now let's store this data along with a note as a serialized R object
-# on a remote file system. We could store any object remotely this way.
-data <- list(data = iris_setosa, special_note = "The best iris species")
-file_id <- write_civis_file(data)
-
-# Finally, let's read back our data from the remote file system.
-data2 <- read_civis(file_id)
-data2[["special_note"]]
-
-## [1] "The best iris species"
-

civis also includes functionality for working with -CivisML, Civis’ machine learning ecosystem. With the combined power of -CivisML and civis, you can build models in the cloud where -the models can use as much memory as they need and there’s no chance of -your laptop crashing.

-
library(civis)
-
-# It really is a great dataset
-data(iris)
-
-# Gradient boosting or random forest, who will win?
-gb_model <- civis_ml_gradient_boosting_classifier(iris, "Species")
-rf_model <- civis_ml_random_forest_classifier(iris, "Species")
-macroavgs <- list(gb_model = gb_model$metrics$metrics$roc_auc_macroavg,
-                  rf_model = rf_model$metrics$metrics$roc_auc_macroavg)
-macroavgs
-
-## $gb_model
-## [1] 0.9945333
-## 
-## $rf_model
-## [1] 0.9954667
-

For a comprehensive list of functions in civis, see Reference -in the full -documentation. The full documentation also includes a set of -Articles for detailed documentation on common workflows, -including data -manipulation and building -models in parallel.

-
- - - - - - - - - - - From 2ab20555648c8e152e87dde852511b68b38346a1 Mon Sep 17 00:00:00 2001 From: Matt Brennan <52160+mattyb@users.noreply.github.com> Date: Mon, 11 Dec 2023 09:58:02 -0500 Subject: [PATCH 22/22] remove cran-comments --- .gitignore | 1 + cran-comments.md | 17 ----------------- 2 files changed, 1 insertion(+), 17 deletions(-) delete mode 100644 cran-comments.md diff --git a/.gitignore b/.gitignore index 18ed2900..40cc188b 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ civis.Rcheck/ doc/ Meta/ CRAN-SUBMISSION +cran-comments.md diff --git a/cran-comments.md b/cran-comments.md deleted file mode 100644 index 861f87e4..00000000 --- a/cran-comments.md +++ /dev/null @@ -1,17 +0,0 @@ -## R CMD check results - -0 errors | 2 warnings | 0 notes - -* This is a new release. - -* checking whether package ‘civis’ can be installed ... WARNING - * `default_credential` masks `civis::default_credential()`. - * `get_database_id` masks `civis::get_database_id()`. - * `sql` masks `civis::sql()`. - - --> the civis:: functions overwrite themselves. There are no actual conflicts. - -* checking top-level files ... WARNING - * A complete check needs the 'checkbashisms' script. - - --> I believe this Warning only appears because I am running the checks on my local system. I did not see this Warning when I ran `devtools::check_rhub()` or `devtools::check_win_release()` \ No newline at end of file