浏览代码

迅捷开发环境,hadoop测试用例提交。

lilb3 6 年之前
父节点
当前提交
d8c7ea8d30

+ 8 - 0
ipu-hadoop-example/aaa.txt

@ -0,0 +1,8 @@
1
111
2
222 333
3
444 555 666
4
777 888
5
999
6
0000000000
7
8

+ 57 - 0
ipu-hadoop-example/pom.xml

@ -0,0 +1,57 @@
1
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
2
	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
3
	<modelVersion>4.0.0</modelVersion>
4
5
	<parent>
6
		<groupId>com.ai.ipu</groupId>
7
		<artifactId>ipu-aggregator</artifactId>
8
		<version>3.1-SNAPSHOT</version>
9
	</parent>
10
11
	<groupId>com.ai.ipu.example</groupId>
12
	<artifactId>ipu-hadoop-example</artifactId>
13
	<version>1.0</version>
14
	<packaging>jar</packaging>
15
16
	<name>ipu-hadoop-example</name>
17
	<url>http://maven.apache.org</url>
18
19
	<properties>
20
		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
21
		<ipu>3.1-SNAPSHOT</ipu>
22
		<jdk>1.8</jdk>
23
		<junit>4.12</junit>
24
		<hadoop-client>2.7.3</hadoop-client>
25
	</properties>
26
27
	<dependencies>
28
		<dependency>
29
			<groupId>junit</groupId>
30
			<artifactId>junit</artifactId>
31
			<version>${junit}</version>
32
		</dependency>
33
34
		<dependency>
35
			<groupId>org.apache.hadoop</groupId>
36
			<artifactId>hadoop-client</artifactId>
37
			<version>${hadoop-client}</version>
38
		</dependency>
39
40
		<dependency>
41
			<groupId>com.ai.ipu</groupId>
42
			<artifactId>ipu-basic</artifactId>
43
		</dependency>
44
	</dependencies>
45
46
	<build>
47
		<plugins>
48
			<plugin>
49
                <groupId>org.apache.maven.plugins</groupId>
50
                <artifactId>maven-surefire-plugin</artifactId>
51
                <configuration>
52
                    <skip>true</skip>
53
                </configuration>
54
            </plugin>
55
		</plugins>
56
	</build>
57
</project>

+ 67 - 0
ipu-hadoop-example/src/test/java/com/ai/ipu/example/hadoop/HadoopConfig.java

@ -0,0 +1,67 @@
1
package com.ai.ipu.example.hadoop;
2
3
import com.ai.ipu.basic.file.ResourceBundleUtil;
4
import com.ai.ipu.basic.log.ILogger;
5
import com.ai.ipu.basic.log.IpuLoggerFactory;
6
7
/**
8
 * 类描述
9
 *
10
 * @Author: lilb3@asiainfo.com
11
 * @Date: 2019-06-18 17:05
12
 **/
13
public class HadoopConfig {
14
    private static final ILogger LOGGER = IpuLoggerFactory.createLogger(HadoopConfig.class);
15
    private static final String CONFIG_FILE_PATH = "hadoop";
16
    /*要连接的Hadoop地址URI, 如hdfs://ip:port*/
17
    private static String hdfsPath;
18
    /*是否支持使用Host Name连接Hadoop, true:支持 false:不支持*/
19
    private static String dfsClientUseDatanodeHostname;
20
    /*测试创建的Dir的名称*/
21
    private static String dirName;
22
    /*测试创建的file的名称, 文件路径为 /mkdir.name/*/
23
    private static String fileName;
24
    /*测试写入文件的字符串*/
25
    private static String writeString;
26
    /*测试上传到hdfs的文件名,本地需要有对应的文件*/
27
    private static String uploadLocalFile;
28
    /*测试下载到本地的文件名*/
29
    private static String downloadLocalFile;
30
31
    public static String getHdfsPath() {
32
        return hdfsPath;
33
    }
34
35
    public static String getDfsClientUseDatanodeHostname() {
36
        return dfsClientUseDatanodeHostname;
37
    }
38
39
    public static String getDirName() {
40
        return dirName;
41
    }
42
43
    public static String getFileName() {
44
        return fileName;
45
    }
46
47
    public static String getWriteString() {
48
        return writeString;
49
    }
50
51
    public static String getUploadLocalFile() {
52
        return uploadLocalFile;
53
    }
54
55
    public static String getDownloadLocalFile() {
56
        return downloadLocalFile;
57
    }
58
59
    /*加载配置文件*/
60
    static {
61
        try {
62
            ResourceBundleUtil.initialize(CONFIG_FILE_PATH, HadoopConfig.class);
63
        } catch (Exception e) {
64
            LOGGER.error(CONFIG_FILE_PATH + "配置文件读取失败", e);
65
        }
66
    }
67
}

+ 108 - 0
ipu-hadoop-example/src/test/java/com/ai/ipu/example/hadoop/HadoopExample.java

@ -0,0 +1,108 @@
1
package com.ai.ipu.example.hadoop;
2
3
import org.apache.hadoop.conf.Configuration;
4
import org.apache.hadoop.fs.BlockLocation;
5
import org.apache.hadoop.fs.FSDataOutputStream;
6
import org.apache.hadoop.fs.FileStatus;
7
import org.apache.hadoop.fs.FileSystem;
8
import org.apache.hadoop.fs.Path;
9
import org.junit.After;
10
import org.junit.Before;
11
import org.junit.Test;
12
13
import java.net.URI;
14
15
/**
16
 * 类描述
17
 *
18
 * @Author: lilb3@asiainfo.com
19
 * @Date: 2019-05-24 17:10
20
 **/
21
public class HadoopExample {
22
    private FileSystem hdfs = null;
23
    private static final String HDFS_SPLIT_STRING = "/";
24
25
26
    @Before
27
    public void setBefore() throws Exception {
28
        Configuration configuration = new Configuration();
29
        configuration.set("dfs.client.use.datanode.hostname", HadoopConfig.getDfsClientUseDatanodeHostname());
30
        String hdfsPath = HadoopConfig.getHdfsPath();
31
        hdfs = FileSystem.get(new URI(hdfsPath), configuration);
32
    }
33
34
    @After
35
    public void setAfter() throws Exception {
36
        if (hdfs != null) {
37
            hdfs.close();
38
        }
39
    }
40
41
    @Test
42
    public void mkdir() throws Exception {
43
        String newDir = HDFS_SPLIT_STRING + HadoopConfig.getDirName();
44
        boolean result = hdfs.mkdirs(new Path(newDir));
45
        if (result) {
46
            System.out.println("Success!");
47
        } else {
48
            System.out.println("Failed!");
49
        }
50
    }
51
52
    @Test
53
    public void createFile() throws Exception {
54
        String filePath = HDFS_SPLIT_STRING + HadoopConfig.getDirName() + HDFS_SPLIT_STRING + HadoopConfig.getFileName();
55
        FSDataOutputStream create = hdfs.create(new Path(filePath));
56
        create.writeBytes(HadoopConfig.getWriteString());
57
        create.close();
58
        System.out.println("Finish!");
59
    }
60
61
    @Test
62
    public void copyFromLocalFile() throws Exception {
63
        String localFile = HadoopConfig.getUploadLocalFile();
64
        String toHDFS = HDFS_SPLIT_STRING + HadoopConfig.getDirName() + HDFS_SPLIT_STRING;
65
        hdfs.copyFromLocalFile(new Path(localFile), new Path(toHDFS));
66
        System.out.println("Finish!");
67
    }
68
69
    @Test
70
    public void copyToLocalFile() throws Exception {
71
        String hdfsFile = HDFS_SPLIT_STRING + HadoopConfig.getDirName() + HDFS_SPLIT_STRING + HadoopConfig.getFileName();
72
        String localFile = HadoopConfig.getDownloadLocalFile();
73
        hdfs.copyToLocalFile(false, new Path(hdfsFile), new Path(localFile));
74
        System.out.println("Finish!");
75
    }
76
77
    @Test
78
    public void listFile() throws Exception {
79
        iteratorListFile(hdfs, new Path(HDFS_SPLIT_STRING));
80
    }
81
82
    @Test
83
    public void locateFile() throws Exception {
84
        Path file = new Path(HDFS_SPLIT_STRING + HadoopConfig.getDirName() + HDFS_SPLIT_STRING + HadoopConfig.getFileName());
85
        FileStatus fileStatus = hdfs.getFileStatus(file);
86
        BlockLocation[] location = hdfs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
87
        for (BlockLocation block : location) {
88
            String[] hosts = block.getHosts();
89
            for (String host : hosts) {
90
                System.out.println("block:" + block + " host:" + host);
91
            }
92
        }
93
    }
94
95
    private static void iteratorListFile(FileSystem hdfs, Path path) throws Exception {
96
        FileStatus[] files = hdfs.listStatus(path);
97
        for (FileStatus file : files) {
98
            if (file.isDirectory()) {
99
                System.out.println(file.getPermission() + " " + file.getOwner()
100
                        + " " + file.getGroup() + " " + file.getPath());
101
                iteratorListFile(hdfs, file.getPath());
102
            } else if (file.isFile()) {
103
                System.out.println(file.getPermission() + " " + file.getOwner()
104
                        + " " + file.getGroup() + " " + file.getPath());
105
            }
106
        }
107
    }
108
}

+ 14 - 0
ipu-hadoop-example/src/test/resources/hadoop.properties

@ -0,0 +1,14 @@
1
#要连接的Hadoop地址URI, 如hdfs://ip:port
2
hdfs.path=hdfs://iZm5e5xe1w25avi0io1f5aZ:9000
3
#是否支持使用Host Name连接Hadoop, true:支持<此处应设置为true> false:不支持
4
dfs.client.use.datanode.hostname=true
5
#测试创建的Dir的名称
6
dir.name=hdfstest
7
#测试创建的file的名称, 文件路径为 /mkdir.name/
8
file.name=touchfile
9
#测试写入文件的字符串
10
write.string=abcdefghijklmnopqrstuvwxyz1234567890
11
#测试上传到hdfs的文件名,本地需要有对应的文件
12
upload.local.file=aaa.txt
13
#测试下载之后存在本地的文件名
14
download.local.file=bbb.txt

+ 8 - 0
ipu-hadoop-example/src/test/resources/log4j.properties

@ -0,0 +1,8 @@
1
log4j.rootLogger=error, console
2
log4j.logger.org.apache.zookeeper=error
3
log4j.logger.com.ai.ipu.example=debug
4
5
log4j.appender.console=org.apache.log4j.ConsoleAppender
6
log4j.appender.console.target=System.out
7
log4j.appender.console.layout=org.apache.log4j.PatternLayout
8
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %5p [%t] (%F:%L) - %m%n