Skip to content

Instantly share code, notes, and snippets.

@neilkimmett
Created July 15, 2020 17:13
Show Gist options
  • Save neilkimmett/e196a9c9b5a7619bd8dd1657bb0f4496 to your computer and use it in GitHub Desktop.
Save neilkimmett/e196a9c9b5a7619bd8dd1657bb0f4496 to your computer and use it in GitHub Desktop.
google-api-python-client
google-auth-httplib2
google-auth-oauthlib
python-dotenv
boto3
import pickle
import io
from dotenv import load_dotenv
import os
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import sys
import requests
import json
import argparse
import boto3
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive']
load_dotenv()
pub_id = os.environ.get('CP_BRIGHTCOVE_API_ACCOUNT_ID')
client_id = os.environ.get('CP_BRIGHTCOVE_API_KEY')
client_secret = os.environ.get('CP_BRIGHTCOVE_API_SECRET')
access_token_url = "https://oauth.brightcove.com/v3/access_token"
def get_file_from_gdrive():
"""Shows basic usage of the Drive v3 API.
Prints the names and ids of the first 10 files the user has access to.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
test_short_url = 'https://drive.google.com/open?id=1ZKybyhkFv3I6tIloDRq4ncy3iAPet5qs'
file_id = '1ZKybyhkFv3I6tIloDRq4ncy3iAPet5qs'
request = service.files().get_media(fileId=file_id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Downloading file from GDrive %d%%. ..." % int(status.progress() * 100))
return fh
def save_file(fh):
fh.seek(0)
with open('your_filename.mov', 'wb') as f:
f.write(fh.getvalue())
# Making requests with the Brightcove CMS API requires the use of OAuth
# get_authorization_headers is a convenience method that obtains an OAuth access token
# and embeds it appropriately in a map suitable for use with the requests HTTP library
def get_authorization_headers():
print("Requesting auth token from Brightcove...")
access_token = None
r = requests.post(access_token_url, params="grant_type=client_credentials", auth=(client_id, client_secret), verify=False)
if r.status_code == 200:
access_token = r.json().get('access_token')
print("✅ Got auth token from Brightcove")
return { 'Authorization': 'Bearer ' + access_token, "Content-Type": "application/json" }
# create_video makes the CMS API call to create a video in the VideoCloud catalog
# This example demonstrates setting only the 'name' attribute on the created title
def create_video():
print("Creating video in VideoCloud CMS API..")
url = ("https://cms.api.brightcove.com/v1/accounts/{pubid}/videos/").format(pubid=pub_id)
data = '{"name": "test script upload"}'
r = requests.post(url, headers=get_authorization_headers(), data=data)
if r.status_code == 200:
print("✅ Finished Creating video in VideoCloud CMS API")
return r.json()
else:
print(f"❌ Got error from VideoCloud API: {r.json()[0]['message']}")
sys.exit(1)
# get_upload_location_and_upload_file first performs an authenticated request to discover
# a Brightcove-provided location to securely upload a source file
def get_upload_location_and_upload_file(account_id, video_id, fh):
print("Requesting S3 file upload location from Brightcove...")
# Perform an authorized request to obtain a file upload location
url = ("https://cms.api.brightcove.com/v1/accounts/{pubid}/videos/{videoid}/upload-urls/{sourcefilename}").format(pubid=pub_id, videoid=video_id, sourcefilename="test-script-upload")
r = requests.get(url, headers=get_authorization_headers())
upload_urls_response = r.json()
print("✅ Got S3 bucket to upload video.")
print("Uploading video into S3 bucket..")
# Upload the contents of our local file to the location provided us
# This example uses the boto3 library to perform a multipart upload
# This is the recommended method for uploading large source files
fh.seek(0)
s3 = boto3.resource('s3',
aws_access_key_id=upload_urls_response['access_key_id'],
aws_secret_access_key=upload_urls_response['secret_access_key'],
aws_session_token=upload_urls_response['session_token'])
s3.Object(upload_urls_response['bucket'], upload_urls_response['object_key']).fileobj(fh)
print("✅ Uploaded video!")
return upload_urls_response
# di_request makes the Ingest API call to populate a video with transcoded renditions
# from the source file that was uploaded in the previous step
def di_request(video_id, upload_urls_response):
print("Making Ingest API call to populate a video with transcoded renditions...")
url = ("https://ingest.api.brightcove.com/v1/accounts/{pubid}/videos/{videoid}/ingest-requests").format(pubid=pub_id, videoid=video_id)
data = '''{"master": { "url": "''' + upload_urls_response['api_request_url'] + '''" }}'''
r = requests.post(url, headers=get_authorization_headers(), data=data)
print("✅ Ingested video!")
return r.json()
if __name__ == '__main__':
file = get_file_from_gdrive()
# save_file(file)
v = create_video()
print(v)
upload_urls = get_upload_location_and_upload_file(pub_id, v['id'], file)
print(di_request(v['id']))
print(upload_urls)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment