8000 HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part5. by zhtttylz · Pull Request #7733 · apache/hadoop · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part5. #7733

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
*/
package org.apache.hadoop.hdfs;

import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;

import java.io.IOException;
import java.io.OutputStream;
Expand Down Expand Up @@ -97,9 +97,9 @@ public static int[] randomFilePartition(int n, int parts) {
}

LOG.info("partition=" + Arrays.toString(p));
assertTrue("i=0", p[0] > 0 && p[0] < n);
assertTrue(p[0] > 0 && p[0] < n, "i=0");
for(int i = 1; i < p.length; i++) {
assertTrue("i=" + i, p[i] > p[i - 1] && p[i] < n);
assertTrue(p[i] > p[i - 1] && p[i] < n, "i=" + i);
}
return p;
}
Expand Down Expand Up @@ -217,8 +217,7 @@ public static void checkFullFile(FileSystem fs, Path name, int len,
boolean checkFileStatus) throws IOException {
if (checkFileStatus) {
final FileStatus status = fs.getFileStatus(name);
assertEquals("len=" + len + " but status.getLen()=" + status.getLen(),
len, status.getLen());
assertEquals(len, status.getLen(), "len=" + len + " but status.getLen()=" + status.getLen());
}

FSDataInputStream stm = fs.open(name);
Expand All @@ -231,9 +230,9 @@ public static void checkFullFile(FileSystem fs, Path name, int len,
private static void checkData(final byte[] actual, int from,
final byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
expected[from+idx], actual[idx]);
assertEquals(expected[from + idx], actual[idx],
message + " byte " + (from + idx) + " differs. expected " +
expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;

/** This is a comprehensive append test that tries
* all combinations of file length and number of appended bytes
Expand Down Expand Up @@ -59,15 +59,15 @@ private static void init(Configuration conf) {
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
}

@BeforeClass
@BeforeAll
public static void startUp () throws IOException {
conf = new HdfsConfiguration();
init(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
fs = cluster.getFileSystem();
}

@AfterClass
@AfterAll
public static void tearDown() {
if (cluster != null) {
cluster.shutdown();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
Expand All @@ -40,6 +39,9 @@
import java.util.ArrayList;
import java.util.Collection;

import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;

/**
* Utility class for testing online recovery of striped files.
*/
Expand Down Expand Up @@ -216,11 +218,11 @@ public static void testReadWithBlockCorrupted(MiniDFSCluster cluster,
+ ", parityBlkDelNum = " + parityBlkDelNum
+ ", deleteBlockFile? " + deleteBlockFile);
int rec 1E79 overBlkNum = dataBlkDelNum + parityBlkDelNum;
Assert.assertTrue("dataBlkDelNum and parityBlkDelNum should be positive",
dataBlkDelNum >= 0 && parityBlkDelNum >= 0);
Assert.assertTrue("The sum of dataBlkDelNum and parityBlkDelNum " +
"should be between 1 ~ " + NUM_PARITY_UNITS, recoverBlkNum <=
NUM_PARITY_UNITS);
assertTrue(dataBlkDelNum >= 0 && parityBlkDelNum >= 0,
"dataBlkDelNum and parityBlkDelNum should be positive");
assertTrue(recoverBlkNum <=
NUM_PARITY_UNITS, "The sum of dataBlkDelNum and parityBlkDelNum " +
"should be between 1 ~ " + NUM_PARITY_UNITS);

// write a file with the length of writeLen
Path srcPath = new Path(src);
Expand Down Expand Up @@ -248,10 +250,10 @@ public static void corruptBlocks(MiniDFSCluster cluster,

int[] delDataBlkIndices = StripedFileTestUtil.randomArray(0, NUM_DATA_UNITS,
dataBlkDelNum);
Assert.assertNotNull(delDataBlkIndices);
assertNotNull(delDataBlkIndices);
int[] delParityBlkIndices = StripedFileTestUtil.randomArray(NUM_DATA_UNITS,
NUM_DATA_UNITS + NUM_PARITY_UNITS, parityBlkDelNum);
Assert.assertNotNull(delParityBlkIndices);
assertNotNull(delParityBlkIndices);

int[] delBlkIndices = new int[recoverBlkNum];
System.arraycopy(delDataBlkIndices, 0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -55,7 +54,11 @@
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;

import static org.junit.Assert.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;

public class StripedFileTestUtil {
public static final Logger LOG =
Expand All @@ -77,7 +80,7 @@ static byte getByte(long pos) {
static void verifyLength(FileSystem fs, Path srcPath, int fileLength)
throws IOException {
FileStatus status = fs.getFileStatus(srcPath);
assertEquals("File length should be the same", fileLength, status.getLen());
assertEquals(fileLength, status.getLen(), "File length should be the same");
}

static void verifyPread(DistributedFileSystem fs, Path srcPath,
Expand Down Expand Up @@ -109,9 +112,8 @@ static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
offset += target;
}
for (int i = 0; i < fileLength - startOffset; i++) {
assertEquals("Byte at " + (startOffset + i) + " is different, "
+ "the startOffset is " + startOffset, expected[startOffset + i],
result[i]);
assertEquals(expected[startOffset + i], result[i], "Byte at " + (startOffset + i) +
" is different, " + "the startOffset is " + startOffset);
}
}
}
Expand All @@ -127,8 +129,8 @@ static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength,
System.arraycopy(buf, 0, result, readLen, ret);
readLen += ret;
}
assertEquals("The length of file should be the same to write size", fileLength, readLen);
Assert.assertArrayEquals(expected, result);
assertEquals(fileLength, readLen, "The length of file should be the same to write size");
assertArrayEquals(expected, result);
}
}

Expand All @@ -144,8 +146,8 @@ static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength,
result.put(buf);
buf.clear();
}
assertEquals("The length of file should be the same to write size", fileLength, readLen);
Assert.assertArrayEquals(expected, result.array());
assertEquals(fileLength, readLen, "The length of file should be the same to write size");
assertArrayEquals(expected, result.array());
}
}

Expand Down Expand Up @@ -185,14 +187,14 @@ static void verifySeek(FileSystem fs, Path srcPath, int fileLength,
if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
try {
in.seek(-1);
Assert.fail("Should be failed if seek to negative offset");
fail("Should be failed if seek to negative offset");
} catch (EOFException e) {
// expected
}

try {
in.seek(fileLength + 1);
Assert.fail("Should be failed if seek after EOF");
fail("Should be failed if seek after EOF");
} catch (EOFException e) {
// expected
}
Expand All @@ -206,8 +208,8 @@ static void assertSeekAndRead(FSDataInputStream fsdis, int pos,
byte[] buf = new byte[writeBytes - pos];
IOUtils.readFully(fsdis, buf, 0, buf.length);
for (int i = 0; i < buf.length; i++) {
assertEquals("Byte at " + i + " should be the same",
StripedFileTestUtil.getByte(pos + i), buf[i]);
assertEquals(StripedFileTestUtil.getByte(pos + i),
buf[i], "Byte at " + i + " should be the same");
}
}

Expand All @@ -225,7 +227,7 @@ static DatanodeInfo getDatanodes(StripedDataStreamer streamer) {
final DatanodeInfo[] datanodes = streamer.getNodes();
if (datanodes != null) {
assertEquals(1, datanodes.length);
Assert.assertNotNull(datanodes[0]);
assertNotNull(datanodes[0]);
return datanodes[0];
}
try {
Expand Down Expand Up @@ -377,13 +379,13 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
final int parityBlkNum = ecPolicy.getNumParityUnits();
int index = 0;
for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
assertTrue(firstBlock instanceof LocatedStripedBlock);

final long gs = firstBlock.getBlock().getGenerationStamp();
final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
final String s = "gs=" + gs + ", oldGS=" + oldGS;
LOG.info(s);
Assert.assertTrue(s, gs >= oldGS);
assertTrue(gs >= oldGS, s);

LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(
(LocatedStripedBlock) firstBlock, cellSize,
Expand Down Expand Up @@ -456,7 +458,7 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(
cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
Assert.assertTrue(posInFile < length);
assertTrue(posInFile < length);
final byte expected = getByte(posInFile);

if (killed) {
Expand All @@ -466,7 +468,7 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
String s = "expected=" + expected + " but actual=" + actual[posInBlk]
+ ", posInFile=" + posInFile + ", posInBlk=" + posInBlk
+ ". group=" + group + ", i=" + i;
Assert.fail(s);
fail(s);
}
}
}
Expand Down Expand Up @@ -507,12 +509,12 @@ static void verifyParityBlocks(Configuration conf, final long size,
try {
encoder.encode(dataBytes, expectedParityBytes);
} catch (IOException e) {
Assert.fail("Unexpected IOException: " + e.getMessage());
fail("Unexpected IOException: " + e.getMessage());
}
for (int i = 0; i < parityBytes.length; i++) {
if (checkSet.contains(i + dataBytes.length)){
Assert.assertArrayEquals("i=" + i, expectedParityBytes[i],
parityBytes[i]);
assertArrayEquals(expectedParityBytes[i],
parityBytes[i], "i=" + i);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs;

import static org.junit.Assert.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;

import java.io.IOException;

Expand All @@ -30,10 +31,9 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

/**
* Test abandoning blocks, which clients do on pipeline creation failure.
Expand All @@ -48,14 +48,14 @@ public class TestAbandonBlock {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;

@Before
@BeforeEach
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
fs = cluster.getFileSystem();
cluster.waitActive();
}

@After
@AfterEach
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
Expand Down Expand Up @@ -100,8 +100,8 @@ public void testAbandonBlock() throws IOException {
cluster.restartNameNode();
blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
Integer.MAX_VALUE);
Assert.assertEquals("Blocks " + b + " has not been abandoned.",
orginalNumBlocks, blocks.locatedBlockCount() + 1);
assertEquals(orginalNumBlocks, blocks.locatedBlockCount() + 1, "Blocks " +
b + " has not been abandoned.");
}

@Test
Expand Down
Loading
0