Skip to content

Commit d2818ba

Browse files
committed
Merge pull request #44 from GoogleCloudPlatform/docstrings
Added doc strings to functions with ambiguous args
2 parents 804f4a9 + a5b3bd8 commit d2818ba

File tree

4 files changed

+47
-0
lines changed

4 files changed

+47
-0
lines changed

bigquery/samples/export_data_to_cloud_storage.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,19 @@
2121
def export_table(service, cloud_storage_path,
2222
projectId, datasetId, tableId,
2323
num_retries=5):
24+
"""
25+
Starts an export job
26+
27+
Args:
28+
service: initialized and authorized bigquery
29+
google-api-client object,
30+
cloud_storage_path: fully qualified
31+
path to a Google Cloud Storage location,
32+
e.g. gs://mybucket/myfolder/
33+
34+
Returns: an extract job resource representing the
35+
job, see https://cloud.google.com/bigquery/docs/reference/v2/jobs
36+
"""
2437
# Generate a unique job_id so retries
2538
# don't accidentally duplicate export
2639
job_data = {

bigquery/samples/load_data_by_post.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,18 @@
2222

2323
# [START make_post]
2424
def make_post(http, schema, data, projectId, datasetId, tableId):
25+
"""
26+
Creates an http POST request for loading data into
27+
a bigquery table
28+
29+
Args:
30+
http: an authorized httplib2 client,
31+
schema: a valid bigquery schema,
32+
see https://cloud.google.com/bigquery/docs/reference/v2/tables,
33+
data: valid JSON to insert into the table
34+
35+
Returns: an http.request object
36+
"""
2537
url = ('https://www.googleapis.com/upload/bigquery/v2/projects/' +
2638
projectId + '/jobs')
2739
# Create the body of the request, separated by a boundary of xxx

bigquery/samples/load_data_from_csv.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,21 @@
2020
# [START load_table]
2121
def load_table(service, source_schema, source_csv,
2222
projectId, datasetId, tableId, num_retries=5):
23+
"""
24+
Starts a job to load a bigquery table from CSV
25+
26+
Args:
27+
service: an initialized and authorized bigquery
28+
google-api-client object
29+
source_schema: a valid bigquery schema,
30+
see https://cloud.google.com/bigquery/docs/reference/v2/tables
31+
source_csv: the fully qualified Google Cloud Storage location of
32+
the data to load into your table
33+
34+
Returns: a bigquery load job, see
35+
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
36+
"""
37+
2338
# Generate a unique job_id so retries
2439
# don't accidentally duplicate query
2540
job_data = {

bigquery/samples/utils.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,11 @@
1515

1616
# [START get_service]
1717
def get_service():
18+
"""returns an initialized and authorized bigquery client"""
19+
1820
from googleapiclient.discovery import build
1921
from oauth2client.client import GoogleCredentials
22+
2023
credentials = GoogleCredentials.get_application_default()
2124
if credentials.create_scoped_required():
2225
credentials = credentials.create_scoped(
@@ -27,6 +30,8 @@ def get_service():
2730

2831
# [START poll_job]
2932
def poll_job(service, projectId, jobId, interval=5, num_retries=5):
33+
"""checks the status of a job every *interval* seconds"""
34+
3035
import time
3136

3237
job_get = service.jobs().get(projectId=projectId, jobId=jobId)
@@ -44,6 +49,8 @@ def poll_job(service, projectId, jobId, interval=5, num_retries=5):
4449

4550
# [START paging]
4651
def paging(service, request_func, num_retries=5, **kwargs):
52+
"""pages though the results of an asynchronous job"""
53+
4754
has_next = True
4855
while has_next:
4956
response = request_func(**kwargs).execute(num_retries=num_retries)

0 commit comments

Comments
 (0)