diff --git a/bigquery/api/async_query_test.py b/bigquery/api/async_query_test.py index 005b1b0d7f4..8aabadd0fed 100644 --- a/bigquery/api/async_query_test.py +++ b/bigquery/api/async_query_test.py @@ -14,25 +14,21 @@ import json from async_query import main -import testing -class TestAsyncQuery(testing.CloudTest): +def test_async_query(cloud_config, capsys): + query = ( + 'SELECT corpus FROM publicdata:samples.shakespeare ' + 'GROUP BY corpus;') - def test_async_query(self): - query = ( - 'SELECT corpus FROM publicdata:samples.shakespeare ' - 'GROUP BY corpus;') + main( + project_id=cloud_config.GCLOUD_PROJECT, + query_string=query, + batch=False, + num_retries=5, + interval=1) - with testing.capture_stdout() as stdout: - main( - project_id=self.config.GCLOUD_PROJECT, - query_string=query, - batch=False, - num_retries=5, - interval=1) + out, _ = capsys.readouterr() + value = out.strip().split('\n').pop() - value = stdout.getvalue().strip().split('\n').pop() - - self.assertIsNotNone( - json.loads(value)) + assert json.loads(value) is not None diff --git a/bigquery/api/export_data_to_cloud_storage_test.py b/bigquery/api/export_data_to_cloud_storage_test.py index 77820bb2792..86204fc05f5 100644 --- a/bigquery/api/export_data_to_cloud_storage_test.py +++ b/bigquery/api/export_data_to_cloud_storage_test.py @@ -12,47 +12,49 @@ # limitations under the License. from export_data_to_cloud_storage import main -import pytest -from testing import CloudTest - - -@pytest.mark.slow -class TestExportTableToGCS(CloudTest): - dataset_id = 'test_dataset' - table_id = 'test_table' - - def test_export_table_csv(self): - cloud_storage_output_uri = \ - 'gs://{}/output.csv'.format(self.config.CLOUD_STORAGE_BUCKET) - main( - cloud_storage_output_uri, - self.config.GCLOUD_PROJECT, - self.dataset_id, - self.table_id, - num_retries=5, - interval=1, - export_format="CSV") - - def test_export_table_json(self): - cloud_storage_output_uri = \ - 'gs://{}/output.json'.format(self.config.CLOUD_STORAGE_BUCKET) - main( - cloud_storage_output_uri, - self.config.GCLOUD_PROJECT, - self.dataset_id, - self.table_id, - num_retries=5, - interval=1, - export_format="NEWLINE_DELIMITED_JSON") - - def test_export_table_avro(self): - cloud_storage_output_uri = \ - 'gs://{}/output.avro'.format(self.config.CLOUD_STORAGE_BUCKET) - main( - cloud_storage_output_uri, - self.config.GCLOUD_PROJECT, - self.dataset_id, - self.table_id, - num_retries=5, - interval=1, - export_format="AVRO") +from testing import mark_flaky + +DATASET_ID = 'test_dataset' +TABLE_ID = 'test_table' + + +@mark_flaky +def test_export_table_csv(cloud_config): + cloud_storage_output_uri = \ + 'gs://{}/output.csv'.format(cloud_config.CLOUD_STORAGE_BUCKET) + main( + cloud_storage_output_uri, + cloud_config.GCLOUD_PROJECT, + DATASET_ID, + TABLE_ID, + num_retries=5, + interval=1, + export_format="CSV") + + +@mark_flaky +def test_export_table_json(cloud_config): + cloud_storage_output_uri = \ + 'gs://{}/output.json'.format(cloud_config.CLOUD_STORAGE_BUCKET) + main( + cloud_storage_output_uri, + cloud_config.GCLOUD_PROJECT, + DATASET_ID, + TABLE_ID, + num_retries=5, + interval=1, + export_format="NEWLINE_DELIMITED_JSON") + + +@mark_flaky +def test_export_table_avro(cloud_config): + cloud_storage_output_uri = \ + 'gs://{}/output.avro'.format(cloud_config.CLOUD_STORAGE_BUCKET) + main( + cloud_storage_output_uri, + cloud_config.GCLOUD_PROJECT, + DATASET_ID, + TABLE_ID, + num_retries=5, + interval=1, + export_format="AVRO") diff --git a/bigquery/api/getting_started_test.py b/bigquery/api/getting_started_test.py index 74114db8f5e..d0ff01b71e0 100644 --- a/bigquery/api/getting_started_test.py +++ b/bigquery/api/getting_started_test.py @@ -14,14 +14,12 @@ import re from getting_started import main -import testing -class TestGettingStarted(testing.CloudTest): - def test_main(self): - with testing.capture_stdout() as mock_stdout: - main(self.config.GCLOUD_PROJECT) +def test_main(cloud_config, capsys): + main(cloud_config.GCLOUD_PROJECT) - stdout = mock_stdout.getvalue() - self.assertRegexpMatches(stdout, re.compile( - r'Query Results:.hamlet', re.DOTALL)) + out, _ = capsys.readouterr() + + assert re.search(re.compile( + r'Query Results:.hamlet', re.DOTALL), out) diff --git a/bigquery/api/list_datasets_projects_test.py b/bigquery/api/list_datasets_projects_test.py index 09d4ad52d51..588086807b0 100644 --- a/bigquery/api/list_datasets_projects_test.py +++ b/bigquery/api/list_datasets_projects_test.py @@ -14,18 +14,14 @@ import re from list_datasets_projects import main -import testing -class TestListDatasetsProjects(testing.CloudTest): +def test_main(cloud_config, capsys): + main(cloud_config.GCLOUD_PROJECT) - def test_main(self): - with testing.capture_stdout() as mock_stdout: - main(self.config.GCLOUD_PROJECT) + out, _ = capsys.readouterr() - stdout = mock_stdout.getvalue() - - self.assertRegexpMatches(stdout, re.compile( - r'Project list:.*bigquery#projectList.*projects', re.DOTALL)) - self.assertRegexpMatches(stdout, re.compile( - r'Dataset list:.*datasets.*datasetId', re.DOTALL)) + assert re.search(re.compile( + r'Project list:.*bigquery#projectList.*projects', re.DOTALL), out) + assert re.search(re.compile( + r'Dataset list:.*datasets.*datasetId', re.DOTALL), out) diff --git a/bigquery/api/load_data_by_post_test.py b/bigquery/api/load_data_by_post_test.py index d88c7fcf0e7..876c8748c9f 100644 --- a/bigquery/api/load_data_by_post_test.py +++ b/bigquery/api/load_data_by_post_test.py @@ -14,44 +14,45 @@ import re from load_data_by_post import load_data -import pytest -import testing - - -@pytest.mark.slow -class TestLoadDataByPost(testing.CloudTest): - dataset_id = 'ephemeral_test_dataset' - table_id = 'load_data_by_post' - - def test_load_csv_data(self): - schema_path = self.resource_path('schema.json') - data_path = self.resource_path('data.csv') - with testing.capture_stdout() as mock_stdout: - load_data(schema_path, - data_path, - self.config.GCLOUD_PROJECT, - self.dataset_id, - self.table_id - ) - - stdout = mock_stdout.getvalue() - - self.assertRegexpMatches(stdout, re.compile( - r'Waiting for job to finish.*Job complete.', re.DOTALL)) - - def test_load_json_data(self): - schema_path = self.resource_path('schema.json') - data_path = self.resource_path('data.json') - - with testing.capture_stdout() as mock_stdout: - load_data(schema_path, - data_path, - self.config.GCLOUD_PROJECT, - self.dataset_id, - self.table_id - ) - - stdout = mock_stdout.getvalue() - - self.assertRegexpMatches(stdout, re.compile( - r'Waiting for job to finish.*Job complete.', re.DOTALL)) +from testing import mark_flaky + +DATASET_ID = 'ephemeral_test_dataset' +TABLE_ID = 'load_data_by_post' + + +@mark_flaky +def test_load_csv_data(cloud_config, resource, capsys): + schema_path = resource('schema.json') + data_path = resource('data.csv') + + load_data( + schema_path, + data_path, + cloud_config.GCLOUD_PROJECT, + DATASET_ID, + TABLE_ID + ) + + out, _ = capsys.readouterr() + + assert re.search(re.compile( + r'Waiting for job to finish.*Job complete.', re.DOTALL), out) + + +@mark_flaky +def test_load_json_data(cloud_config, resource, capsys): + schema_path = resource('schema.json') + data_path = resource('data.json') + + load_data( + schema_path, + data_path, + cloud_config.GCLOUD_PROJECT, + DATASET_ID, + TABLE_ID + ) + + out, _ = capsys.readouterr() + + assert re.search(re.compile( + r'Waiting for job to finish.*Job complete.', re.DOTALL), out) diff --git a/bigquery/api/load_data_from_csv_test.py b/bigquery/api/load_data_from_csv_test.py index 3bb989a6fce..0cc64adb423 100644 --- a/bigquery/api/load_data_from_csv_test.py +++ b/bigquery/api/load_data_from_csv_test.py @@ -13,25 +13,24 @@ from load_data_from_csv import main -import pytest -from testing import CloudTest +from testing import mark_flaky +DATASET_ID = 'test_dataset' +TABLE_ID = 'test_import_table' -@pytest.mark.slow -class TestLoadDataFromCSV(CloudTest): - dataset_id = 'test_dataset' - table_id = 'test_import_table' - def test_load_table(self): - cloud_storage_input_uri = 'gs://{}/data.csv'.format( - self.config.CLOUD_STORAGE_BUCKET) - schema_file = self.resource_path('schema.json') +@mark_flaky +def test_load_table(cloud_config, resource): + cloud_storage_input_uri = 'gs://{}/data.csv'.format( + cloud_config.CLOUD_STORAGE_BUCKET) + schema_file = resource('schema.json') - main( - self.config.GCLOUD_PROJECT, - self.dataset_id, - self.table_id, - schema_file=schema_file, - data_path=cloud_storage_input_uri, - poll_interval=1, - num_retries=5) + main( + cloud_config.GCLOUD_PROJECT, + DATASET_ID, + TABLE_ID, + schema_file=schema_file, + data_path=cloud_storage_input_uri, + poll_interval=1, + num_retries=5 + ) diff --git a/bigquery/api/streaming_test.py b/bigquery/api/streaming_test.py index 44751e47727..de22c58d202 100644 --- a/bigquery/api/streaming_test.py +++ b/bigquery/api/streaming_test.py @@ -14,28 +14,25 @@ import json import streaming -from testing import capture_stdout, CloudTest -class TestStreaming(CloudTest): - dataset_id = 'test_dataset' - table_id = 'test_table' +DATASET_ID = 'test_dataset' +TABLE_ID = 'test_table' - def test_stream_row_to_bigquery(self): - with open( - self.resource_path('streamrows.json'), - 'r') as rows_file: - rows = json.load(rows_file) +def test_stream_row_to_bigquery(cloud_config, resource, capsys): + with open(resource('streamrows.json'), 'r') as rows_file: + rows = json.load(rows_file) - streaming.get_rows = lambda: rows + streaming.get_rows = lambda: rows - with capture_stdout() as stdout: - streaming.main( - self.config.GCLOUD_PROJECT, - self.dataset_id, - self.table_id, - num_retries=5) + streaming.main( + cloud_config.GCLOUD_PROJECT, + DATASET_ID, + TABLE_ID, + num_retries=5) - results = stdout.getvalue().split('\n') - self.assertIsNotNone(json.loads(results[0])) + out, _ = capsys.readouterr() + results = out.split('\n') + + assert json.loads(results[0]) is not None diff --git a/bigquery/api/sync_query_test.py b/bigquery/api/sync_query_test.py index c39f6af0abc..7f1d196f2b6 100644 --- a/bigquery/api/sync_query_test.py +++ b/bigquery/api/sync_query_test.py @@ -14,22 +14,20 @@ import json from sync_query import main -from testing import capture_stdout, CloudTest -class TestSyncQuery(CloudTest): +def test_sync_query(cloud_config, capsys): + query = ( + 'SELECT corpus FROM publicdata:samples.shakespeare ' + 'GROUP BY corpus;') - def test_sync_query(self): - query = ( - 'SELECT corpus FROM publicdata:samples.shakespeare ' - 'GROUP BY corpus;') + main( + project_id=cloud_config.GCLOUD_PROJECT, + query=query, + timeout=30, + num_retries=5) - with capture_stdout() as stdout: - main( - project_id=self.config.GCLOUD_PROJECT, - query=query, - timeout=30, - num_retries=5) + out, _ = capsys.readouterr() + result = out.split('\n')[0] - result = stdout.getvalue().split('\n')[0] - self.assertIsNotNone(json.loads(result)) + assert json.loads(result) is not None