Skip to content

MAPREDUCE-7421. [JDK17] Upgrade Junit 4 to 5 in hadoop-mapreduce-client-jobclient Part2. #7372

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Feb 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,8 @@ private Job submitAndValidateJob(JobConf conf, int numMaps, int numReds,
assertTrue(fs.exists(outDir), "Job output directory doesn't exit!");
FileStatus[] list = fs.listStatus(outDir, new OutputFilter());
int numPartFiles = numReds == 0 ? numMaps : numReds;
assertTrue(list.length == numPartFiles, "Number of part-files is " + list.length + " and not "
+ numPartFiles);
assertTrue(list.length == numPartFiles,
"Number of part-files is " + list.length + " and not " + numPartFiles);
return job;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@
import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.junit.jupiter.api.Test;

import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;

public class TestClientProtocolProviderImpls {

Expand Down Expand Up @@ -91,9 +91,9 @@ public void testClusterExceptionRootCause() throws Exception {
fail("Cluster init should fail because of non-existing FileSystem");
} catch (IOException ioEx) {
final String stackTrace = StringUtils.stringifyException(ioEx);
assertTrue("No root cause detected",
stackTrace.contains(UnsupportedFileSystemException.class.getName())
&& stackTrace.contains("nosuchfs"));
assertTrue(stackTrace.contains(
UnsupportedFileSystemException.class.getName()) && stackTrace.contains("nosuchfs"),
"No root cause detected");
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,12 @@
import org.apache.hadoop.mapreduce.counters.Limits;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.junit.Test;
import org.junit.jupiter.api.Test;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;

import static org.junit.Assert.*;
/**
* TestCounters checks the sanity and recoverability of {@code Counters}
*/
Expand All @@ -46,19 +49,19 @@ public void testCounterValue() {
long expectedValue = initValue;
Counter counter = new Counters().findCounter("test", "foo");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",
expectedValue, counter.getValue());
assertEquals(expectedValue, counter.getValue(),
"Counter value is not initialized correctly");
for (int j = 0; j < NUMBER_INC; j++) {
int incValue = rand.nextInt();
counter.increment(incValue);
expectedValue += incValue;
assertEquals("Counter value is not incremented correctly",
expectedValue, counter.getValue());
assertEquals(expectedValue, counter.getValue(),
"Counter value is not incremented correctly");
}
expectedValue = rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",
expectedValue, counter.getValue());
assertEquals(expectedValue, counter.getValue(),
"Counter value is not set correctly");
}
}

Expand Down Expand Up @@ -148,6 +151,6 @@ private void shouldThrow(Class<? extends Exception> ecls, Runnable runnable) {
LOG.info("got expected: "+ e);
return;
}
assertTrue("Should've thrown "+ ecls.getSimpleName(), false);
assertTrue(false, "Should've thrown "+ ecls.getSimpleName());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,25 +23,25 @@
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

import java.io.IOException;

import static org.junit.Assert.assertEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;

public class TestLargeSort {
MiniMRClientCluster cluster;

@Before
@BeforeEach
public void setup() throws IOException {
Configuration conf = new YarnConfiguration();
cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
cluster.start();
}

@After
@AfterEach
public void cleanup() throws IOException {
if (cluster != null) {
cluster.stop();
Expand All @@ -59,8 +59,8 @@ public void testLargeSort() throws Exception {
conf.setInt(MRJobConfig.IO_SORT_MB, ioSortMb);
conf.setInt(LargeSorter.NUM_MAP_TASKS, 1);
conf.setInt(LargeSorter.MBS_PER_MAP, ioSortMb);
assertEquals("Large sort failed for " + ioSortMb, 0,
ToolRunner.run(conf, new LargeSorter(), args));
assertEquals(0, ToolRunner.run(conf, new LargeSorter(), args),
"Large sort failed for " + ioSortMb);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -44,10 +45,10 @@
import java.util.ArrayList;
import java.util.List;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;

/**
* Stress tests for the LocalJobRunner
Expand Down Expand Up @@ -235,9 +236,9 @@ private void verifyOutput(Path outputPath) throws IOException {

// Should get a single line of the form "0\t(count)"
String line = r.readLine().trim();
assertTrue("Line does not have correct key", line.startsWith("0\t"));
assertTrue(line.startsWith("0\t"), "Line does not have correct key");
int count = Integer.valueOf(line.substring(2));
assertEquals("Incorrect count generated!", TOTAL_RECORDS, count);
assertEquals(TOTAL_RECORDS, count, "Incorrect count generated!");

r.close();

Expand Down Expand Up @@ -276,23 +277,24 @@ public void testGcCounter() throws Exception {
FileOutputFormat.setOutputPath(job, outputPath);

boolean ret = job.waitForCompletion(true);
assertTrue("job failed", ret);
assertTrue(ret, "job failed");

// This job should have done *some* gc work.
// It had to clean up 400,000 objects.
// We strongly suspect this will result in a few milliseconds effort.
Counter gcCounter = job.getCounters().findCounter(
TaskCounter.GC_TIME_MILLIS);
assertNotNull(gcCounter);
assertTrue("No time spent in gc", gcCounter.getValue() > 0);
assertTrue(gcCounter.getValue() > 0, "No time spent in gc");
}


/**
* Run a test with several mappers in parallel, operating at different
* speeds. Verify that the correct amount of output is created.
*/
@Test(timeout=120*1000)
@Test
@Timeout(value=120)
public void testMultiMaps() throws Exception {
Job job = Job.getInstance();

Expand Down Expand Up @@ -377,7 +379,7 @@ public void testInvalidMultiMapParallelism() throws Exception {
FileOutputFormat.setOutputPath(job, outputPath);

boolean success = job.waitForCompletion(true);
assertFalse("Job succeeded somehow", success);
assertFalse(success, "Job succeeded somehow");
}

/** An IF that creates no splits */
Expand Down Expand Up @@ -434,7 +436,7 @@ public void testEmptyMaps() throws Exception {
FileOutputFormat.setOutputPath(job, outputPath);

boolean success = job.waitForCompletion(true);
assertTrue("Empty job should work", success);
assertTrue(success, "Empty job should work");
}

/** @return the directory where numberfiles are written (mapper inputs) */
Expand Down Expand Up @@ -510,7 +512,7 @@ private void verifyNumberJob(int numMaps) throws Exception {
int expectedPerMapper = maxVal * (maxVal + 1) / 2;
int expectedSum = expectedPerMapper * numMaps;
LOG.info("expected sum: " + expectedSum + ", got " + valueSum);
assertEquals("Didn't get all our results back", expectedSum, valueSum);
assertEquals(expectedSum, valueSum, "Didn't get all our results back");
}

/**
Expand Down Expand Up @@ -551,7 +553,7 @@ private void doMultiReducerTest(int numMaps, int numReduces,
LocalJobRunner.setLocalMaxRunningReduces(job, parallelReduces);

boolean result = job.waitForCompletion(true);
assertTrue("Job failed!!", result);
assertTrue(result, "Job failed!!");

verifyNumberJob(numMaps);
}
Expand Down
Loading