Calls the 'Google Vision' API to return annotations. The function automatically creates batches
get_annotations(images, features, max_res, mode)
A character vector for images to be annotated. Can either be url strings or local images, as specified with mode
.
A character vector for the features to be returned. Accepts 'all'
or any combination of the following inputs: 'label', 'web', 'text', 'face', 'landmark', 'logo', 'safe_search', 'object', 'properties'
An integer specifying the maximum number of results to be returned for each feature.
Accepts 'url'
for image urls and 'local'
for file paths to local images.
An response object of class 'gvision_annotations'
.
if (FALSE) {
gvision_init()
# one image url
sw_image <- 'https://upload.wikimedia.org/wikipedia/en/4/40/Star_Wars_Phantom_Menace_poster.jpg'
results <- get_annotations(images = sw_image, # image character vector
features = 'all', # request all available features
max_res = 10, # maximum number of results per feature
mode = 'url') # maximum number of results per feature
# multiple image urls
finn_image <- 'https://upload.wikimedia.org/wikipedia/en/2/2a/Finn-Force_Awakens_%282015%29.png'
padme_image <- 'https://upload.wikimedia.org/wikipedia/en/e/ee/Amidala.png'
input_imgs <- c(sw_image, finn_image, padme_image)
results <- get_annotations(images = input_imgs,
features = c('label', 'face'), max_res = 5, mode = 'url')
# one local image
temp_img_path <- tempfile(fileext = '.png')
download.file(finn_image, temp_img_path, mode = 'wb', quiet = TRUE)
results <- get_annotations(images = temp_img_path,
features = c('label', 'face'), max_res = 5, mode = 'local')
}