Skip to content

Commit 19bd575

Browse files
slfan1989cnauroth
andauthored
MAPREDUCE-7421. [JDK17] Upgrade Junit 4 to 5 in hadoop-mapreduce-client-jobclient Part1. (#7358)
Co-authored-by: Chris Nauroth <[email protected]> Reviewed-by: Chris Nauroth <[email protected]> Signed-off-by: Shilun Fan <[email protected]>
1 parent 14b7159 commit 19bd575

File tree

110 files changed

+1413
-1326
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

110 files changed

+1413
-1326
lines changed

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestNoDefaultsJobConf.java

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
import org.apache.hadoop.mapred.TextInputFormat;
3030
import org.apache.hadoop.mapred.TextOutputFormat;
3131
import org.apache.hadoop.mapred.Utils;
32-
import org.junit.Test;
32+
import org.junit.jupiter.api.Test;
3333

3434
import java.io.BufferedReader;
3535
import java.io.IOException;
@@ -39,8 +39,10 @@
3939
import java.io.OutputStreamWriter;
4040
import java.io.Writer;
4141

42-
import static org.junit.Assert.assertEquals;
43-
import static org.junit.Assert.assertTrue;
42+
import static org.junit.jupiter.api.Assertions.assertEquals;
43+
import static org.junit.jupiter.api.Assertions.assertNotNull;
44+
import static org.junit.jupiter.api.Assertions.assertNull;
45+
import static org.junit.jupiter.api.Assertions.assertTrue;
4446

4547
/**
4648
* This testcase tests that a JobConf without default values submits jobs
@@ -56,10 +58,10 @@ public TestNoDefaultsJobConf() throws IOException {
5658
@Test
5759
public void testNoDefaults() throws Exception {
5860
JobConf configuration = new JobConf();
59-
assertTrue(configuration.get("hadoop.tmp.dir", null) != null);
61+
assertNotNull(configuration.get("hadoop.tmp.dir", null));
6062

6163
configuration = new JobConf(false);
62-
assertTrue(configuration.get("hadoop.tmp.dir", null) == null);
64+
assertNull(configuration.get("hadoop.tmp.dir", null));
6365

6466

6567
Path inDir = new Path("testing/jobconf/input");

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/DFSCIOTest.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@
3434
import org.apache.hadoop.io.Text;
3535
import org.apache.hadoop.io.SequenceFile.CompressionType;
3636
import org.apache.hadoop.mapred.*;
37-
import org.junit.Ignore;
38-
import org.junit.Test;
37+
import org.junit.jupiter.api.Disabled;
38+
import org.junit.jupiter.api.Test;
3939
import org.slf4j.Logger;
4040
import org.slf4j.LoggerFactory;
4141

@@ -66,7 +66,7 @@
6666
* <li>standard i/o rate deviation</li>
6767
* </ul>
6868
*/
69-
@Ignore
69+
@Disabled
7070
public class DFSCIOTest {
7171
// Constants
7272
private static final Logger LOG = LoggerFactory.getLogger(DFSCIOTest.class);

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,10 @@
6666
import org.apache.hadoop.util.StringUtils;
6767
import org.apache.hadoop.util.Tool;
6868
import org.apache.hadoop.util.ToolRunner;
69-
import org.junit.AfterClass;
70-
import org.junit.BeforeClass;
71-
import org.junit.Test;
69+
import org.junit.jupiter.api.AfterAll;
70+
import org.junit.jupiter.api.BeforeAll;
71+
import org.junit.jupiter.api.Test;
72+
import org.junit.jupiter.api.Timeout;
7273
import org.slf4j.Logger;
7374
import org.slf4j.LoggerFactory;
7475

@@ -226,7 +227,7 @@ private static Path getDataDir(Configuration conf) {
226227
private static MiniDFSCluster cluster;
227228
private static TestDFSIO bench;
228229

229-
@BeforeClass
230+
@BeforeAll
230231
public static void beforeClass() throws Exception {
231232
bench = new TestDFSIO();
232233
bench.getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -241,7 +242,7 @@ public static void beforeClass() throws Exception {
241242
testWrite();
242243
}
243244

244-
@AfterClass
245+
@AfterAll
245246
public static void afterClass() throws Exception {
246247
if(cluster == null)
247248
return;
@@ -256,45 +257,51 @@ public static void testWrite() throws Exception {
256257
bench.analyzeResult(fs, TestType.TEST_TYPE_WRITE, execTime);
257258
}
258259

259-
@Test (timeout = 10000)
260+
@Test
261+
@Timeout(value = 10)
260262
public void testRead() throws Exception {
261263
FileSystem fs = cluster.getFileSystem();
262264
long execTime = bench.readTest(fs);
263265
bench.analyzeResult(fs, TestType.TEST_TYPE_READ, execTime);
264266
}
265267

266-
@Test (timeout = 10000)
268+
@Test
269+
@Timeout(value = 10)
267270
public void testReadRandom() throws Exception {
268271
FileSystem fs = cluster.getFileSystem();
269272
bench.getConf().setLong("test.io.skip.size", 0);
270273
long execTime = bench.randomReadTest(fs);
271274
bench.analyzeResult(fs, TestType.TEST_TYPE_READ_RANDOM, execTime);
272275
}
273276

274-
@Test (timeout = 10000)
277+
@Test
278+
@Timeout(value = 10)
275279
public void testReadBackward() throws Exception {
276280
FileSystem fs = cluster.getFileSystem();
277281
bench.getConf().setLong("test.io.skip.size", -DEFAULT_BUFFER_SIZE);
278282
long execTime = bench.randomReadTest(fs);
279283
bench.analyzeResult(fs, TestType.TEST_TYPE_READ_BACKWARD, execTime);
280284
}
281285

282-
@Test (timeout = 10000)
286+
@Test
287+
@Timeout(value = 10)
283288
public void testReadSkip() throws Exception {
284289
FileSystem fs = cluster.getFileSystem();
285290
bench.getConf().setLong("test.io.skip.size", 1);
286291
long execTime = bench.randomReadTest(fs);
287292
bench.analyzeResult(fs, TestType.TEST_TYPE_READ_SKIP, execTime);
288293
}
289294

290-
@Test (timeout = 10000)
295+
@Test
296+
@Timeout(value = 10)
291297
public void testAppend() throws Exception {
292298
FileSystem fs = cluster.getFileSystem();
293299
long execTime = bench.appendTest(fs);
294300
bench.analyzeResult(fs, TestType.TEST_TYPE_APPEND, execTime);
295301
}
296302

297-
@Test (timeout = 60000)
303+
@Test
304+
@Timeout(value = 60)
298305
public void testTruncate() throws Exception {
299306
FileSystem fs = cluster.getFileSystem();
300307
bench.createControlFile(fs, DEFAULT_NR_BYTES / 2, DEFAULT_NR_FILES);

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,16 @@
4747
import org.apache.hadoop.mapred.lib.LongSumReducer;
4848
import org.apache.hadoop.security.UserGroupInformation;
4949
import org.apache.hadoop.util.StringUtils;
50-
import org.junit.Test;
50+
import org.junit.jupiter.api.Test;
5151
import org.slf4j.Logger;
5252
import org.slf4j.LoggerFactory;
5353

5454
import static org.assertj.core.api.Assertions.assertThat;
55-
import static org.junit.Assert.assertTrue;
56-
import static org.junit.Assert.assertEquals;
57-
import static org.junit.Assert.assertNotSame;
58-
import static org.junit.Assert.assertFalse;
59-
import static org.junit.Assert.fail;
55+
import static org.junit.jupiter.api.Assertions.assertTrue;
56+
import static org.junit.jupiter.api.Assertions.assertEquals;
57+
import static org.junit.jupiter.api.Assertions.assertNotSame;
58+
import static org.junit.jupiter.api.Assertions.assertFalse;
59+
import static org.junit.jupiter.api.Assertions.fail;
6060

6161

6262
public class TestFileSystem {

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestJHLA.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
import java.io.OutputStreamWriter;
2424
import java.io.File;
2525

26-
import org.junit.After;
27-
import org.junit.Before;
28-
import org.junit.Test;
26+
import org.junit.jupiter.api.AfterEach;
27+
import org.junit.jupiter.api.BeforeEach;
28+
import org.junit.jupiter.api.Test;
2929
import org.slf4j.Logger;
3030
import org.slf4j.LoggerFactory;
3131

@@ -40,7 +40,7 @@ public class TestJHLA {
4040
private String historyLog = System.getProperty("test.build.data",
4141
"build/test/data") + "/history/test.log";
4242

43-
@Before
43+
@BeforeEach
4444
public void setUp() throws Exception {
4545
File logFile = new File(historyLog);
4646
if(!logFile.getParentFile().exists())
@@ -121,7 +121,7 @@ public void setUp() throws Exception {
121121
writer.close();
122122
}
123123

124-
@After
124+
@AfterEach
125125
public void tearDown() throws Exception {
126126
File logFile = new File(historyLog);
127127
if(!logFile.delete())

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TestSlive.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
package org.apache.hadoop.fs.slive;
2020

2121
import static org.assertj.core.api.Assertions.assertThat;
22-
import static org.junit.Assert.assertEquals;
23-
import static org.junit.Assert.assertTrue;
22+
import static org.junit.jupiter.api.Assertions.assertEquals;
23+
import static org.junit.jupiter.api.Assertions.assertTrue;
2424

2525
import java.io.DataInputStream;
2626
import java.io.File;
@@ -40,8 +40,8 @@
4040
import org.apache.hadoop.fs.slive.DataVerifier.VerifyOutput;
4141
import org.apache.hadoop.fs.slive.DataWriter.GenerateOutput;
4242
import org.apache.hadoop.util.ToolRunner;
43-
import org.junit.Before;
44-
import org.junit.Test;
43+
import org.junit.jupiter.api.BeforeEach;
44+
import org.junit.jupiter.api.Test;
4545
import org.slf4j.Logger;
4646
import org.slf4j.LoggerFactory;
4747

@@ -194,7 +194,7 @@ private ConfigExtractor getTestConfig(boolean sleep) throws Exception {
194194
return extractor;
195195
}
196196

197-
@Before
197+
@BeforeEach
198198
public void ensureDeleted() throws Exception {
199199
rDelete(getTestFile());
200200
rDelete(getTestDir());

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/TestNNBench.java

Lines changed: 23 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@
1717
*/
1818
package org.apache.hadoop.hdfs;
1919

20-
import static org.junit.Assert.assertEquals;
21-
import static org.junit.Assert.assertFalse;
22-
import static org.junit.Assert.assertTrue;
20+
import static org.junit.jupiter.api.Assertions.assertEquals;
21+
import static org.junit.jupiter.api.Assertions.assertFalse;
22+
import static org.junit.jupiter.api.Assertions.assertTrue;
2323

2424
import java.io.File;
2525
import java.io.IOException;
@@ -31,8 +31,9 @@
3131
import org.apache.hadoop.mapred.JobConf;
3232
import org.apache.hadoop.util.Time;
3333
import org.apache.hadoop.util.ToolRunner;
34-
import org.junit.After;
35-
import org.junit.Test;
34+
import org.junit.jupiter.api.AfterEach;
35+
import org.junit.jupiter.api.Test;
36+
import org.junit.jupiter.api.Timeout;
3637

3738
public class TestNNBench extends HadoopTestCase {
3839
private static final String BASE_DIR =
@@ -45,39 +46,39 @@ public TestNNBench() throws IOException {
4546
super(LOCAL_MR, LOCAL_FS, 1, 1);
4647
}
4748

48-
@After
49+
@AfterEach
4950
public void tearDown() throws Exception {
5051
getFileSystem().delete(new Path(BASE_DIR), true);
5152
getFileSystem().delete(new Path(NNBench.DEFAULT_RES_FILE_NAME), true);
5253
super.tearDown();
5354
}
5455

55-
@Test(timeout = 30000)
56+
@Test
57+
@Timeout(value = 30)
5658
public void testNNBenchCreateReadAndDelete() throws Exception {
5759
runNNBench(createJobConf(), "create_write");
5860
Path path = new Path(BASE_DIR + "/data/file_0_0");
59-
assertTrue("create_write should create the file",
60-
getFileSystem().exists(path));
61+
assertTrue(getFileSystem().exists(path), "create_write should create the file");
6162
runNNBench(createJobConf(), "open_read");
6263
runNNBench(createJobConf(), "delete");
63-
assertFalse("Delete operation should delete the file",
64-
getFileSystem().exists(path));
64+
assertFalse(getFileSystem().exists(path),
65+
"Delete operation should delete the file");
6566
}
6667

67-
@Test(timeout = 30000)
68+
@Test
69+
@Timeout(value = 30)
6870
public void testNNBenchCreateAndRename() throws Exception {
6971
runNNBench(createJobConf(), "create_write");
7072
Path path = new Path(BASE_DIR + "/data/file_0_0");
71-
assertTrue("create_write should create the file",
72-
getFileSystem().exists(path));
73+
assertTrue(getFileSystem().exists(path), "create_write should create the file");
7374
runNNBench(createJobConf(), "rename");
7475
Path renamedPath = new Path(BASE_DIR + "/data/file_0_r_0");
75-
assertFalse("Rename should rename the file", getFileSystem().exists(path));
76-
assertTrue("Rename should rename the file",
77-
getFileSystem().exists(renamedPath));
76+
assertFalse(getFileSystem().exists(path), "Rename should rename the file");
77+
assertTrue(getFileSystem().exists(renamedPath), "Rename should rename the file");
7878
}
7979

80-
@Test(timeout = 30000)
80+
@Test
81+
@Timeout(value = 30)
8182
public void testNNBenchCreateControlFilesWithPool() throws Exception {
8283
runNNBench(createJobConf(), "create_write", BASE_DIR, "5");
8384
Path path = new Path(BASE_DIR, CONTROL_DIR_NAME);
@@ -86,7 +87,8 @@ public void testNNBenchCreateControlFilesWithPool() throws Exception {
8687
assertEquals(5, fileStatuses.length);
8788
}
8889

89-
@Test(timeout = 30000)
90+
@Test
91+
@Timeout(value = 30)
9092
public void testNNBenchCrossCluster() throws Exception {
9193
MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(new JobConf())
9294
.numDataNodes(1).build();
@@ -96,8 +98,8 @@ public void testNNBenchCrossCluster() throws Exception {
9698
runNNBench(createJobConf(), "create_write", baseDir);
9799

98100
Path path = new Path(BASE_DIR + "/data/file_0_0");
99-
assertTrue("create_write should create the file",
100-
dfsCluster.getFileSystem().exists(path));
101+
assertTrue(dfsCluster.getFileSystem().exists(path),
102+
"create_write should create the file");
101103
dfsCluster.shutdown();
102104
}
103105

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/TestSequenceFileMergeProgress.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
import org.apache.hadoop.mapred.*;
2929

3030
import org.slf4j.Logger;
31-
import org.junit.Test;
32-
import static org.junit.Assert.assertEquals;
31+
import org.junit.jupiter.api.Test;
32+
import static org.junit.jupiter.api.Assertions.assertEquals;
3333

3434
public class TestSequenceFileMergeProgress {
3535
private static final Logger LOG = FileInputFormat.LOG;

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/ipc/TestMRCJCSocketFactory.java

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,10 @@
3434
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
3535
import org.apache.hadoop.net.StandardSocketFactory;
3636
import org.apache.hadoop.yarn.conf.YarnConfiguration;
37-
import org.junit.Assert;
38-
import org.junit.Test;
37+
import org.junit.jupiter.api.Test;
38+
39+
import static org.junit.jupiter.api.Assertions.assertFalse;
40+
import static org.junit.jupiter.api.Assertions.assertTrue;
3941

4042
/**
4143
* This class checks that RPCs can use specialized socket factories.
@@ -56,13 +58,13 @@ public void testSocketFactory() throws IOException {
5658

5759
// Get a reference to its DFS directly
5860
FileSystem fs = cluster.getFileSystem();
59-
Assert.assertTrue(fs instanceof DistributedFileSystem);
61+
assertTrue(fs instanceof DistributedFileSystem);
6062
DistributedFileSystem directDfs = (DistributedFileSystem) fs;
6163

6264
Configuration cconf = getCustomSocketConfigs(nameNodePort);
6365

6466
fs = FileSystem.get(cconf);
65-
Assert.assertTrue(fs instanceof DistributedFileSystem);
67+
assertTrue(fs instanceof DistributedFileSystem);
6668
DistributedFileSystem dfs = (DistributedFileSystem) fs;
6769

6870
JobClient client = null;
@@ -72,12 +74,12 @@ public void testSocketFactory() throws IOException {
7274
// could we test Client-DataNode connections?
7375
Path filePath = new Path("/dir");
7476

75-
Assert.assertFalse(directDfs.exists(filePath));
76-
Assert.assertFalse(dfs.exists(filePath));
77+
assertFalse(directDfs.exists(filePath));
78+
assertFalse(dfs.exists(filePath));
7779

7880
directDfs.mkdirs(filePath);
79-
Assert.assertTrue(directDfs.exists(filePath));
80-
Assert.assertTrue(dfs.exists(filePath));
81+
assertTrue(directDfs.exists(filePath));
82+
assertTrue(dfs.exists(filePath));
8183

8284
// This will test RPC to a Resource Manager
8385
fs = FileSystem.get(sconf);
@@ -95,7 +97,7 @@ public void testSocketFactory() throws IOException {
9597
client = new JobClient(jconf);
9698

9799
JobStatus[] jobs = client.jobsToComplete();
98-
Assert.assertTrue(jobs.length == 0);
100+
assertTrue(jobs.length == 0);
99101

100102
} finally {
101103
closeClient(client);

0 commit comments

Comments
 (0)