Browse Source

common-hbase 通用模块提交

chenweida 7 years ago
parent
commit
75ae4fd7ca

+ 47 - 37
common/common-hbase/pom.xml

@ -15,42 +15,52 @@
    <dependencies>
        <dependency>
        <groupId>org.apache.hbase</groupId>
        <artifactId>hbase-client</artifactId>
        <version>${version.hbase}</version>
        <exclusions>
            <exclusion>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
            <exclusion>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
            </exclusion>
            <exclusion>
                <groupId>javax.servlet</groupId>
                <artifactId>servlet-api</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-client</artifactId>
        <version>${version.hadoop}</version>
        <exclusions>
            <exclusion>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
            <exclusion>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
            </exclusion>
            <exclusion>
                <groupId>javax.servlet</groupId>
                <artifactId>servlet-api</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-lang3</artifactId>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-common</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-protocol</artifactId>
        </dependency>
        <dependency>
            <groupId>com.fasterxml.jackson.core</groupId>
            <artifactId>jackson-databind</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework</groupId>
            <artifactId>spring-beans</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework</groupId>
            <artifactId>spring-context</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.data</groupId>
            <artifactId>spring-data-hadoop-hbase</artifactId>
        </dependency>
    </dependencies>
</project>

+ 37 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/AbstractHBaseClient.java

@ -0,0 +1,37 @@
package com.yihu.ehr.hbase;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.security.UserGroupInformation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.hadoop.hbase.HbaseTemplate;
import java.io.IOException;
/**
 * @author hzp
 * @created 2017.05.03
 */
public class AbstractHBaseClient {
    @Autowired
    protected HbaseTemplate hbaseTemplate;
    /**
     * 创建连接
     */
    protected Connection getConnection() throws Exception {
        return getConnection(hbaseTemplate);
    }
    /**
     * 创建连接
     */
    protected Connection getConnection(HbaseTemplate hbaseTemplate) throws Exception {
        Connection connection = ConnectionFactory.createConnection(hbaseTemplate.getConfiguration());
        return connection;
    }
}

+ 155 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/HBaseAdmin.java

@ -0,0 +1,155 @@
package com.yihu.ehr.hbase;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.yihu.ehr.hbase.AbstractHBaseClient;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.springframework.data.hadoop.hbase.TableCallback;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
 * @author hzp
 * @created 2017.05.03
 */
@Service
public class HBaseAdmin extends AbstractHBaseClient {
    /**
     * 判断表是否存在
     */
    public boolean isTableExists(String tableName) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        boolean ex = admin.tableExists(TableName.valueOf(tableName));
        admin.close();
        connection.close();
        return ex;
    }
    /**
     * 创建表
     */
    public void createTable(String tableName, String... columnFamilies) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        if (!admin.tableExists(TableName.valueOf(tableName))) {
            HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));
            for (String fc : columnFamilies) {
                tableDescriptor.addFamily(new HColumnDescriptor(fc));
            }
            admin.createTable(tableDescriptor);
        }
        admin.close();
        connection.close();
    }
    /**
     * 模糊匹配表名
     */
    public List<String> getTableList(String regex, boolean includeSysTables) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        TableName[] tableNames;
        if (regex == null || regex.length() == 0) {
            tableNames = admin.listTableNames();
        } else {
            tableNames = admin.listTableNames(regex, includeSysTables);
        }
        List<String> tables = new ArrayList<>();
        for (TableName tableName : tableNames) {
            tables.add(tableName.getNameAsString());
        }
        admin.close();
        connection.close();
        return tables;
    }
    /**
     * 批量清空表数据
     */
    public void truncate(List<String> tables) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        try {
            for (String tableName : tables) {
                TableName tn = TableName.valueOf(tableName);
                if (admin.tableExists(TableName.valueOf(tableName))) {
                    HTableDescriptor descriptor = admin.getTableDescriptor(tn);
                    admin.disableTable(tn);
                    admin.deleteTable(tn);
                    admin.createTable(descriptor);
                }
                else{
                    System.out.print("not exit table "+tableName+".\r\n");
                }
                /*else{
                    HTableDescriptor descriptor = new HTableDescriptor(tableName);
                    descriptor.addFamily(new HColumnDescriptor("basic"));
                    descriptor.addFamily(new HColumnDescriptor("d"));
                    admin.createTable(descriptor);
                }*/
            }
        } finally {
            admin.close();
            connection.close();
        }
    }
    /**
     * 删除表结构
     */
    public void dropTable(String tableName) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        try {
            admin.disableTable(TableName.valueOf(tableName));
            admin.deleteTable(TableName.valueOf(tableName));
        } finally {
            admin.close();
            connection.close();
        }
    }
    public ObjectNode getTableMetaData(String tableName) {
        return hbaseTemplate.execute(tableName, new TableCallback<ObjectNode>() {
            public ObjectNode doInTable(HTableInterface table) throws Throwable {
                ObjectMapper objectMapper = new ObjectMapper();
                ObjectNode root = objectMapper.createObjectNode();
                HTableDescriptor tableDescriptor = table.getTableDescriptor();
                HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
                for (int i = 0; i < columnDescriptors.length; ++i) {
                    HColumnDescriptor columnDescriptor = columnDescriptors[i];
                    root.put(Integer.toString(i), Bytes.toString(columnDescriptor.getName()));
                }
                return root;
            }
        });
    }
}

+ 346 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/HBaseDao.java

@ -0,0 +1,346 @@
package com.yihu.ehr.hbase;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.hadoop.hbase.RowMapper;
import org.springframework.data.hadoop.hbase.TableCallback;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.*;
/**
 * 数据增删改查
 */
@Service
public class HBaseDao extends AbstractHBaseClient {
    @Autowired
    ObjectMapper objectMapper;
    /**
     *模糊匹配rowkey
     */
    public String[] findRowKeys(String tableName, String rowkeyRegEx) throws Exception {
        Scan scan = new Scan();
        scan.addFamily(Bytes.toBytes("basic"));
        scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(rowkeyRegEx)));
        List<String> list = new LinkedList<>();
        hbaseTemplate.find(tableName, scan, new RowMapper<Void>() {
            @Override
            public Void mapRow(Result result, int rowNum) throws Exception {
                list.add(Bytes.toString(result.getRow()));
                return null;
            }
        });
        return list.toArray(new String[list.size()]);
    }
    /**
     *表总条数
     */
    public Integer count(String tableName) throws Exception {
        Scan scan = new Scan();
        scan.addFamily(Bytes.toBytes("basic"));
        scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("^")));
        List<String> list = new LinkedList<>();
        hbaseTemplate.find(tableName, scan, new RowMapper<Void>() {
            @Override
            public Void mapRow(Result result, int rowNum) throws Exception {
                list.add(Bytes.toString(result.getRow()));
                return null;
            }
        });
        return list.size();
    }
    /**
     * 根据 rowkey获取一条记录
     */
    public String get(String tableName, String rowkey) {
        return hbaseTemplate.get(tableName, rowkey,new RowMapper<String>() {
            public String mapRow(Result result, int rowNum) throws Exception {
                if(!result.isEmpty())
                {
                    List<Cell> ceList = result.listCells();
                    Map<String, Object> map = new HashMap<String, Object>();
                    map.put("rowkey",rowkey);
                    if (ceList != null && ceList.size() > 0) {
                        for (Cell cell : ceList) {
                            //默认不加列族
                            // Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) +"_"
                            map.put(Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()),
                                    Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
                        }
                    }
                    return objectMapper.writeValueAsString(map);
                }
                else{
                    return "";
                }
            }
        });
    }
    /**
     * 通过表名  key 和 列族 和列 获取一个数据
     */
    public String get(String tableName ,String rowkey, String familyName, String qualifier) {
        return hbaseTemplate.get(tableName, rowkey,familyName,qualifier ,new RowMapper<String>(){
            public String mapRow(Result result, int rowNum) throws Exception {
                List<Cell> ceList =   result.listCells();
                String res = "";
                if(ceList!=null&&ceList.size()>0){
                    for(Cell cell:ceList){
                        res = Bytes.toString( cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                    }
                }
                return res;
            }
        });
    }
    /**
     * 通过rowkey获取某行数据
     */
    public Result getResult(String tableName, String rowKey) throws Exception {
        return hbaseTemplate.get(tableName, rowKey, new RowMapper<Result>() {
            public Result mapRow(Result result, int rowNum) throws Exception {
                return result;
            }
        });
    }
    /**
     * 通过rowkey获取多行数据
     */
    public Result[] getResultList(String tableName,List<String> rowKeys) throws Exception {
        return hbaseTemplate.execute(tableName, new TableCallback<Result[]>() {
            public Result[] doInTable(HTableInterface table) throws Throwable {
                List<Get> list = new ArrayList<Get>();
                for (String rowKey : rowKeys) {
                    Get get = new Get(Bytes.toBytes(rowKey));
                    list.add(get);
                }
                return  table.get(list);
            }
        });
    }
    /**
     * 通过rowkey获取某行数据
     */
    public Map<String, Object> getResultMap(String tableName, String rowKey) throws Exception {
        return hbaseTemplate.get(tableName, rowKey, new RowMapper<Map<String, Object>>() {
            public Map<String, Object> mapRow(Result result, int rowNum) throws Exception {
                Map<String, Object> map = null;
                if(result!=null) {
                    List<Cell> ceList = result.listCells();
                    if (ceList != null && ceList.size() > 0) {
                        map = new HashMap<String, Object>();
                        map.put("rowkey", rowKey);
                        for (Cell cell : ceList) {
                            //默认不加列族
                            // Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) +"_"
                            map.put(Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()),
                                    Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
                        }
                    }
                }
                return map;
            }
        });
    }
    /**
     * 修改某行某列值
     */
    public void put(String tableName ,String rowkey, String familyName, String qualifier,String value) throws Exception
    {
        hbaseTemplate.execute(tableName, new TableCallback<String>() {
            public String doInTable(HTableInterface table) throws Throwable {
                Put p = new Put(rowkey.getBytes());
                p.add(familyName.getBytes(), qualifier.getBytes(), value.getBytes());
                table.put(p);
                return null;
            }
        });
    }
    /**
     * 新增行
     */
    public void add(String tableName , String rowkey, Map<String,Map<String,String>> family) throws Exception
    {
        hbaseTemplate.execute(tableName, new TableCallback<String>() {
            public String doInTable(HTableInterface table) throws Throwable {
                Put p = new Put(rowkey.getBytes());
                for(String familyName : family.keySet())
                {
                    Map<String,String> map = family.get(familyName);
                    for (String qualifier : map.keySet())
                    {
                        String value = map.get(qualifier);
                        p.add(familyName.getBytes(), qualifier.getBytes(), value.getBytes());
                    }
                }
                table.put(p);
                return null;
            }
        });
    }
    /**
     * 新增数据
     */
    public void add(String tableName, String rowKey, String family, Object[] columns, Object[] values) throws Exception {
        hbaseTemplate.execute(tableName, new TableCallback<Object>() {
            public Object doInTable(HTableInterface htable) throws Throwable {
                Put put = new Put(Bytes.toBytes(rowKey));
                for (int j = 0; j < columns.length; j++) {
                    //为空字段不保存
                    if(values[j]!=null)
                    {
                        String column = String.valueOf(columns[j]);
                        String value = String.valueOf(values[j]);
                        put.addColumn(Bytes.toBytes(family),
                                Bytes.toBytes(column),
                                Bytes.toBytes(value));
                    }
                }
                htable.put(put);
                return null;
            }
        });
    }
    /**
     * 根据 rowkey删除一条记录
     */
    public void delete(String tableName, String rowkey)  {
        hbaseTemplate.execute(tableName, new TableCallback<String>() {
            public String doInTable(HTableInterface table) throws Throwable {
                Delete d = new Delete(rowkey.getBytes());
                table.delete(d);
                return null;
            }
        });
    }
    /**
     * 批量删除数据
     */
    public Object[] deleteBatch(String tableName, String[] rowKeys) throws Exception {
        return hbaseTemplate.execute(tableName, new TableCallback<Object[]>() {
            public Object[] doInTable(HTableInterface table) throws Throwable {
                List<Delete> deletes = new ArrayList<>(rowKeys.length);
                for (String rowKey : rowKeys) {
                    Delete delete = new Delete(Bytes.toBytes(rowKey));
                    deletes.add(delete);
                }
                Object[] results = new Object[deletes.size()];
                table.batch(deletes, results);
                return results;
            }
        });
    }
    /**
     * 删除列族
     */
    public void deleteFamily(String tableName, String rowKey, String familyName) throws Exception {
        hbaseTemplate.delete(tableName, rowKey, familyName);
    }
    /**
     * 删除某列
     */
    public void deleteColumn(String tableName, String rowKey, String familyName, String columnName) throws Exception {
        hbaseTemplate.delete(tableName, rowKey, familyName, columnName);
    }
    /************************************* Bean使用原型模式 ***************************************************************/
    /**
     * 保存数据 原型模式
     */
    public void save(String tableName, TableBundle tableBundle) throws Exception {
        hbaseTemplate.execute(tableName, new TableCallback<Object>() {
            public Object doInTable(HTableInterface htable) throws Throwable {
                List<Put> puts = tableBundle.putOperations();
                Object[] results = new Object[puts.size()];
                htable.batch(puts, results);
                return null;
            }
        });
    }
    /**
     * 查询数据 原型模式
     */
    public Object[] get(String tableName, TableBundle tableBundle) {
        return hbaseTemplate.execute(tableName, new TableCallback<Object[]>() {
            public Object[] doInTable(HTableInterface table) throws Throwable {
                List<Get> gets = tableBundle.getOperations();
                Object[] results = new Object[gets.size()];
                table.batch(gets, results);
                if (results.length > 0 && results[0].toString().equals("keyvalues=NONE")) return null;
                return results;
            }
        });
    }
    /**
     * 删除数据 原型模式
     */
    public void delete(String tableName, TableBundle tableBundle) {
        hbaseTemplate.execute(tableName, new TableCallback<Object[]>() {
            public Object[] doInTable(HTableInterface table) throws Throwable {
                List<Delete> deletes = tableBundle.deleteOperations();
                Object[] results = new Object[deletes.size()];
                table.batch(deletes, results);
                return null;
            }
        });
    }
}

+ 166 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/TableBundle.java

@ -0,0 +1,166 @@
package com.yihu.ehr.hbase;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
import java.util.*;
import java.util.stream.Collectors;
/**
 * 将HBase中的行,列族,列捆绑成一束。并一次性生成所需要的Get, Put操作。
 * <p>
 * 仅支持单表操作。
 * <p>
 * 虽然支持多种HBase操作,但请注意,一次只能用于一种操作,如:Get,Put,Delete不能混用,
 * 否则将出现难以预料的后果。
 *
 * @author Sand
 * @created 2016.04.27 14:38
 */
public class TableBundle {
    Map<String, Row> rows = new HashMap<>();
    public void addRows(String... rowkeys) {
        for (String rowkey : rowkeys) {
            rows.put(rowkey, null);
        }
    }
    public void addFamily(String rowkey, Object family) {
        Row row = getRow(rowkey);
        row.addFamily(family.toString());
    }
    public void addColumns(String rowkey, Object family, String[] columns) {
        Row row = getRow(rowkey);
        row.addColumns(family.toString(), columns);
    }
    public void addValues(String rowkey, Object family, Map<String, String> values) {
        Row row = getRow(rowkey);
        row.addValues(family.toString(), values);
    }
    public void clear() {
        rows.clear();
    }
    public List<Get> getOperations() {
        List<Get> gets = new ArrayList<>(rows.size());
        for (String rowkey : rows.keySet()) {
            Get get = new Get(Bytes.toBytes(rowkey));
            Row row = rows.get(rowkey);
            if (row != null) {
                for (String family : row.getFamilies()) {
                    Set<Object> columns = row.getCells(family);
                    if (CollectionUtils.isEmpty(columns)) {
                        get.addFamily(Bytes.toBytes(family));
                    }
                    for (Object column : columns) {
                        get.addColumn(Bytes.toBytes(family), Bytes.toBytes((String) column));
                    }
                }
            }
            gets.add(get);
        }
        return gets;
    }
    public List<Put> putOperations() {
        List<Put> puts = new ArrayList<>(rows.values().size());
        for (String rowkey : rows.keySet()) {
            Put put = new Put(Bytes.toBytes(rowkey));
            Row row = rows.get(rowkey);
            for (String family : row.getFamilies()) {
                Set<Object> columns = row.getCells(family);
                for (Object column : columns) {
                    Pair<String, String> pair = (Pair<String, String>) column;
                    if (StringUtils.isNotEmpty(pair.getRight())) {
                        put.addColumn(Bytes.toBytes(family),
                                Bytes.toBytes(pair.getLeft()),
                                Bytes.toBytes(pair.getRight()));
                    }
                }
            }
            puts.add(put);
        }
        return puts;
    }
    public List<Delete> deleteOperations() {
        List<Delete> deletes = new ArrayList<>(rows.values().size());
        for (String rowkey : rows.keySet()) {
            Delete delete = new Delete(Bytes.toBytes(rowkey));
            deletes.add(delete);
        }
        return deletes;
    }
    private Row getRow(String rowkey) {
        Row row = rows.get(rowkey);
        if (row == null) {
            row = new Row();
            rows.put(rowkey, row);
        }
        return row;
    }
    /**
     * HBase中的一行
     */
    public static class Row {
        private Map<String, Set<Object>> cells = new HashMap<>();   // key为family,value为columns
        public void addFamily(String family) {
            cells.put(family, null);
        }
        public void addColumns(String family, String... columns) {
            Set value = getFamily(family);
            for (String column : columns) {
                value.add(column);
            }
        }
        public void addValues(String family, Map<String, String> values) {
            Set value = getFamily(family);
            value.addAll(values.keySet().stream().map(key -> new ImmutablePair<>(key, values.get(key))).collect(Collectors.toList()));
        }
        public Set<String> getFamilies() {
            return cells.keySet();
        }
        public Set<Object> getCells(String family) {
            return cells.get(family);
        }
        private Set<Object> getFamily(String family) {
            Set value = cells.get(family);
            if (value == null) {
                value = new TreeSet<>();
                cells.put(family, value);
            }
            return value;
        }
    }
}

+ 88 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/config/HbaseConfig.java

@ -0,0 +1,88 @@
package com.yihu.ehr.hbase.config;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.security.UserGroupInformation;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.hadoop.hbase.HbaseTemplate;
import org.springframework.data.hadoop.hbase.TableCallback;
import java.util.*;
/**
 * @author Sand
 * @version 1.0
 * @created 2015.11.28 16:26
 */
@Configuration
@ConfigurationProperties(prefix = "hadoop")
public class HbaseConfig{
    private Map<String, String> hbaseProperties = new HashMap<>();
    public Map<String, String> getHbaseProperties(){
        return this.hbaseProperties;
    }
    @Value("${hadoop.user.name}")
    private String user;
    @Bean
    public org.apache.hadoop.conf.Configuration configuration() {
        Set<String> keys = new HashSet<>(hbaseProperties.keySet());
        for (String key : keys){
            String value = hbaseProperties.remove(key);
            key = key.replaceAll("^\\d{1,2}\\.", "");
            hbaseProperties.put(key, value);
        }
        org.apache.hadoop.conf.Configuration configuration = HBaseConfiguration.create();
        hbaseProperties.keySet().stream().filter(key -> hbaseProperties.get(key) != null).forEach(key -> {
            configuration.set(key, hbaseProperties.get(key));
        });
        return configuration;
    }
    @Bean
    public HbaseTemplate hbaseTemplate(org.apache.hadoop.conf.Configuration configuration){
        HbaseTemplate hbaseTemplate = new HbaseTemplate();
        hbaseTemplate.setConfiguration(configuration);
        try
        {
            System.setProperty("HADOOP_USER_NAME", user);
            String tableName = "HealthProfile";
            Connection connection = ConnectionFactory.createConnection(configuration);
            Admin admin = connection.getAdmin();
            boolean ex = admin.tableExists(TableName.valueOf(tableName));
            //判断是否存在
            if(ex)
            {
                hbaseTemplate.execute(tableName, new TableCallback<Object>() {
                    @Override
                    public Object doInTable(HTableInterface table) throws Throwable {
                        Get get = new Get(Bytes.toBytes("connection-init"));
                        Result result = table.get(get);
                        return result;
                    }
                });
            }
            admin.close();
            connection.close();
        }
        catch (Exception ex)
        {
            ex.printStackTrace();
        }
        return hbaseTemplate;
    }
}

+ 163 - 0
common/common-hbase/src/main/resources/hbase/core-site.xml

@ -0,0 +1,163 @@
  <configuration>
    
    <property>
      <name>fs.defaultFS</name>
      <value>hdfs://dev</value>
    </property>
    
    <property>
      <name>fs.trash.interval</name>
      <value>360</value>
    </property>
    
    <property>
      <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
      <value>120</value>
    </property>
    
    <property>
      <name>ha.zookeeper.quorum</name>
      <value>node1.hde.h3c.com:2181,node2.hde.h3c.com:2181,node3.hde.h3c.com:2181</value>
    </property>
    
    <property>
      <name>hadoop.http.authentication.simple.anonymous.allowed</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hbase.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hbase.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hcat.groups</name>
      <value>users</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hcat.hosts</name>
      <value>node2.hde.h3c.com</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hdfs.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hdfs.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hive.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hive.hosts</name>
      <value>node2.hde.h3c.com</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.HTTP.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.HTTP.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hue.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hue.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.oozie.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.oozie.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.security.auth_to_local</name>
      <value>DEFAULT</value>
    </property>
    
    <property>
      <name>hadoop.security.authentication</name>
      <value>simple</value>
    </property>
    
    <property>
      <name>hadoop.security.authorization</name>
      <value>false</value>
    </property>
    
    <property>
      <name>hadoop.security.key.provider.path</name>
      <value></value>
    </property>
    
    <property>
      <name>io.compression.codecs</name>
      <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
    </property>
    
    <property>
      <name>io.file.buffer.size</name>
      <value>131072</value>
    </property>
    
    <property>
      <name>io.serializations</name>
      <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
    </property>
    
    <property>
      <name>ipc.client.connect.max.retries</name>
      <value>50</value>
    </property>
    
    <property>
      <name>ipc.client.connection.maxidletime</name>
      <value>30000</value>
    </property>
    
    <property>
      <name>ipc.client.idlethreshold</name>
      <value>8000</value>
    </property>
    
    <property>
      <name>ipc.server.tcpnodelay</name>
      <value>true</value>
    </property>
    
    <property>
      <name>mapreduce.jobtracker.webinterface.trusted</name>
      <value>false</value>
    </property>
    
    <property>
      <name>net.topology.script.file.name</name>
      <value>/etc/hadoop/conf/topology_script.py</value>
    </property>
    
  </configuration>

+ 243 - 0
common/common-hbase/src/main/resources/hbase/hbase-site.xml

@ -0,0 +1,243 @@
  <configuration>
    
    <property>
      <name>dfs.domain.socket.path</name>
      <value>/var/lib/hadoop-hdfs/dn_socket</value>
    </property>
    
    <property>
      <name>hbase.bulkload.staging.dir</name>
      <value>/apps/hbase/staging</value>
    </property>
    
    <property>
      <name>hbase.client.keyvalue.maxsize</name>
      <value>1048576</value>
    </property>
    
    <property>
      <name>hbase.client.retries.number</name>
      <value>35</value>
    </property>
    
    <property>
      <name>hbase.client.scanner.caching</name>
      <value>100</value>
    </property>
    
    <property>
      <name>hbase.cluster.distributed</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.coprocessor.master.classes</name>
      <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    
    <property>
      <name>hbase.coprocessor.region.classes</name>
      <value>org.apache.hadoop.hbase.security.access.AccessController,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
    </property>
    
    <property>
      <name>hbase.coprocessor.regionserver.classes</name>
      <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    
    <property>
      <name>hbase.defaults.for.version.skip</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.hregion.majorcompaction</name>
      <value>604800000</value>
    </property>
    
    <property>
      <name>hbase.hregion.majorcompaction.jitter</name>
      <value>0.50</value>
    </property>
    
    <property>
      <name>hbase.hregion.max.filesize</name>
      <value>10737418240</value>
    </property>
    
    <property>
      <name>hbase.hregion.memstore.block.multiplier</name>
      <value>4</value>
    </property>
    
    <property>
      <name>hbase.hregion.memstore.flush.size</name>
      <value>134217728</value>
    </property>
    
    <property>
      <name>hbase.hregion.memstore.mslab.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.hstore.blockingStoreFiles</name>
      <value>10</value>
    </property>
    
    <property>
      <name>hbase.hstore.compaction.max</name>
      <value>10</value>
    </property>
    
    <property>
      <name>hbase.hstore.compactionThreshold</name>
      <value>3</value>
    </property>
    
    <property>
      <name>hbase.local.dir</name>
      <value>${hbase.tmp.dir}/local</value>
    </property>
    
    <property>
      <name>hbase.master.info.bindAddress</name>
      <value>0.0.0.0</value>
    </property>
    
    <property>
      <name>hbase.master.info.port</name>
      <value>16010</value>
    </property>
    
    <property>
      <name>hbase.master.port</name>
      <value>16000</value>
    </property>
    
    <property>
      <name>hbase.regionserver.global.memstore.size</name>
      <value>0.4</value>
    </property>
    
    <property>
      <name>hbase.regionserver.handler.count</name>
      <value>30</value>
    </property>
    
    <property>
      <name>hbase.regionserver.info.port</name>
      <value>16030</value>
    </property>
    
    <property>
      <name>hbase.regionserver.port</name>
      <value>16020</value>
    </property>
    
    <property>
      <name>hbase.regionserver.thrift.http</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.regionserver.wal.codec</name>
      <value>org.apache.hadoop.hbase.regionserver.wal.WALCellCodec</value>
    </property>
    
    <property>
      <name>hbase.replication</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.rootdir</name>
      <value>hdfs://dev/apps/hbase/data</value>
    </property>
    
    <property>
      <name>hbase.rpc.protection</name>
      <value>authentication</value>
    </property>
    
    <property>
      <name>hbase.rpc.timeout</name>
      <value>90000</value>
    </property>
    
    <property>
      <name>hbase.security.authentication</name>
      <value>simple</value>
    </property>
    
    <property>
      <name>hbase.security.authorization</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.superuser</name>
      <value>hbase</value>
    </property>
    
    <property>
      <name>hbase.thrift.support.proxyuser</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.tmp.dir</name>
      <value>/hadoop/hbase</value>
    </property>
    
    <property>
      <name>hbase.zookeeper.property.clientPort</name>
      <value>2181</value>
    </property>
    
    <property>
      <name>hbase.zookeeper.quorum</name>
      <value>node1.hde.h3c.com,node2.hde.h3c.com,node3.hde.h3c.com</value>
    </property>
    
    <property>
      <name>hbase.zookeeper.useMulti</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hfile.block.cache.size</name>
      <value>0.40</value>
    </property>
    
    <property>
      <name>phoenix.query.timeoutMs</name>
      <value>60000</value>
    </property>
    
    <property>
      <name>replication.replicationsource.implementation</name>
      <value>com.ngdata.sep.impl.SepReplicationSource</value>
    </property>
    
    <property>
      <name>replication.source.nb.capacity</name>
      <value>1000</value>
    </property>
    
    <property>
      <name>replication.source.ratio</name>
      <value>1</value>
    </property>
    
    <property>
      <name>zookeeper.session.timeout</name>
      <value>90000</value>
    </property>
    
    <property>
      <name>zookeeper.znode.parent</name>
      <value>/hbase-unsecure</value>
    </property>
    
  </configuration>

+ 348 - 0
common/common-hbase/src/main/resources/hbase/hdfs-site.xml

@ -0,0 +1,348 @@
  <configuration>
    
    <property>
      <name>dfs.block.access.token.enable</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.blockreport.initialDelay</name>
      <value>120</value>
    </property>
    
    <property>
      <name>dfs.blocksize</name>
      <value>134217728</value>
    </property>
    
    <property>
      <name>dfs.client.failover.proxy.provider.dev</name>
      <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    
    <property>
      <name>dfs.client.read.shortcircuit</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.client.read.shortcircuit.streams.cache.size</name>
      <value>4096</value>
    </property>
    
    <property>
      <name>dfs.client.retry.policy.enabled</name>
      <value>false</value>
    </property>
    
    <property>
      <name>dfs.cluster.administrators</name>
      <value> hdfs</value>
    </property>
    
    <property>
      <name>dfs.content-summary.limit</name>
      <value>5000</value>
    </property>
    
    <property>
      <name>dfs.datanode.address</name>
      <value>0.0.0.0:50010</value>
    </property>
    
    <property>
      <name>dfs.datanode.balance.bandwidthPerSec</name>
      <value>6250000</value>
    </property>
    
    <property>
      <name>dfs.datanode.data.dir</name>
      <value>/opt/hadoop/hdfs/data</value>
    </property>
    
    <property>
      <name>dfs.datanode.data.dir.perm</name>
      <value>750</value>
    </property>
    
    <property>
      <name>dfs.datanode.du.reserved</name>
      <value>1073741824</value>
    </property>
    
    <property>
      <name>dfs.datanode.failed.volumes.tolerated</name>
      <value>0</value>
    </property>
    
    <property>
      <name>dfs.datanode.http.address</name>
      <value>0.0.0.0:50075</value>
    </property>
    
    <property>
      <name>dfs.datanode.https.address</name>
      <value>0.0.0.0:50475</value>
    </property>
    
    <property>
      <name>dfs.datanode.ipc.address</name>
      <value>0.0.0.0:8010</value>
    </property>
    
    <property>
      <name>dfs.datanode.max.transfer.threads</name>
      <value>4096</value>
    </property>
    
    <property>
      <name>dfs.domain.socket.path</name>
      <value>/var/lib/hadoop-hdfs/dn_socket</value>
    </property>
    
    <property>
      <name>dfs.encrypt.data.transfer.cipher.suites</name>
      <value>AES/CTR/NoPadding</value>
    </property>
    
    <property>
      <name>dfs.encryption.key.provider.uri</name>
      <value></value>
    </property>
    
    <property>
      <name>dfs.ha.automatic-failover.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.ha.fencing.methods</name>
      <value>shell(/bin/true)</value>
    </property>
    
    <property>
      <name>dfs.ha.namenodes.dev</name>
      <value>nn1,nn2</value>
    </property>
    
    <property>
      <name>dfs.heartbeat.interval</name>
      <value>3</value>
    </property>
    
    <property>
      <name>dfs.hosts.exclude</name>
      <value>/etc/hadoop/conf/dfs.exclude</value>
    </property>
    
    <property>
      <name>dfs.http.policy</name>
      <value>HTTP_ONLY</value>
    </property>
    
    <property>
      <name>dfs.https.port</name>
      <value>50470</value>
    </property>
    
    <property>
      <name>dfs.journalnode.edits.dir</name>
      <value>/hadoop/hdfs/journal</value>
    </property>
    
    <property>
      <name>dfs.journalnode.http-address</name>
      <value>0.0.0.0:8480</value>
    </property>
    
    <property>
      <name>dfs.journalnode.https-address</name>
      <value>0.0.0.0:8481</value>
    </property>
    
    <property>
      <name>dfs.namenode.accesstime.precision</name>
      <value>0</value>
    </property>
    
    <property>
      <name>dfs.namenode.audit.log.async</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.avoid.read.stale.datanode</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.avoid.write.stale.datanode</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.dir</name>
      <value>/opt/hadoop/hdfs/namesecondary</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.edits.dir</name>
      <value>${dfs.namenode.checkpoint.dir}</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.period</name>
      <value>21600</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.txns</name>
      <value>1000000</value>
    </property>
    
    <property>
      <name>dfs.namenode.fslock.fair</name>
      <value>false</value>
    </property>
    
    <property>
      <name>dfs.namenode.handler.count</name>
      <value>100</value>
    </property>
    
    <property>
      <name>dfs.namenode.http-address</name>
      <value>node1.hde.h3c.com:50070</value>
    </property>
    
    <property>
      <name>dfs.namenode.http-address.dev.nn1</name>
      <value>node1.hde.h3c.com:50070</value>
    </property>
    
    <property>
      <name>dfs.namenode.http-address.dev.nn2</name>
      <value>node2.hde.h3c.com:50070</value>
    </property>
    
    <property>
      <name>dfs.namenode.https-address</name>
      <value>node1.hde.h3c.com:50470</value>
    </property>
    
    <property>
      <name>dfs.namenode.https-address.dev.nn1</name>
      <value>node1.hde.h3c.com:50470</value>
    </property>
    
    <property>
      <name>dfs.namenode.https-address.dev.nn2</name>
      <value>node2.hde.h3c.com:50470</value>
    </property>
    
    <property>
      <name>dfs.namenode.name.dir</name>
      <value>/opt/hadoop/hdfs/namenode</value>
    </property>
    
    <property>
      <name>dfs.namenode.name.dir.restore</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.rpc-address</name>
      <value>node1.hde.h3c.com:8020</value>
    </property>
    
    <property>
      <name>dfs.namenode.rpc-address.dev.nn1</name>
      <value>node1.hde.h3c.com:8020</value>
    </property>
    
    <property>
      <name>dfs.namenode.rpc-address.dev.nn2</name>
      <value>node2.hde.h3c.com:8020</value>
    </property>
    
    <property>
      <name>dfs.namenode.safemode.threshold-pct</name>
      <value>0.99</value>
    </property>
    
    <property>
      <name>dfs.namenode.secondary.http-address</name>
      <value>localhost:50090</value>
    </property>
    
    <property>
      <name>dfs.namenode.shared.edits.dir</name>
      <value>qjournal://node1.hde.h3c.com:8485;node2.hde.h3c.com:8485;node3.hde.h3c.com:8485/dev</value>
    </property>
    
    <property>
      <name>dfs.namenode.stale.datanode.interval</name>
      <value>30000</value>
    </property>
    
    <property>
      <name>dfs.namenode.startup.delay.block.deletion.sec</name>
      <value>3600</value>
    </property>
    
    <property>
      <name>dfs.namenode.write.stale.datanode.ratio</name>
      <value>1.0f</value>
    </property>
    
    <property>
      <name>dfs.nameservices</name>
      <value>dev</value>
    </property>
    
    <property>
      <name>dfs.permissions.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.permissions.superusergroup</name>
      <value>hdfs</value>
    </property>
    
    <property>
      <name>dfs.replication</name>
      <value>3</value>
    </property>
    
    <property>
      <name>dfs.replication.max</name>
      <value>50</value>
    </property>
    
    <property>
      <name>dfs.support.append</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.webhdfs.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>fs.permissions.umask-mode</name>
      <value>022</value>
    </property>
    
    <property>
      <name>nfs.exports.allowed.hosts</name>
      <value>* rw</value>
    </property>
    
    <property>
      <name>nfs.file.dump.dir</name>
      <value>/tmp/.hdfs-nfs</value>
    </property>
    
  </configuration>

+ 0 - 23
common/common-mongodb/pom.xml

@ -1,23 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>com.yihu.jw</groupId>
        <artifactId>common-lib-parent-pom</artifactId>
        <version>1.0.0</version>
        <relativePath>../../common-lib-parent-pom/pom.xml</relativePath>
    </parent>
    <artifactId>common-mongodb</artifactId>
    <version>1.0.0</version>
    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-mongodb</artifactId>
        </dependency>
    </dependencies>
</project>

+ 43 - 1
svr-lib-parent-pom/pom.xml

@ -71,7 +71,7 @@
        <verion.fastJson>1.2.17</verion.fastJson>
        <version.net-json>2.4</version.net-json>
        <version.springside>4.2.3-GA</version.springside>
        <version.common.lang3>3.1</version.common.lang3>
        <version.common.lang3>3.2.1</version.common.lang3>
        <version.elasticsearch>2.4.4</version.elasticsearch>
        <version.elasticsearch-sql>2.4.1.0</version.elasticsearch-sql>
@ -81,6 +81,8 @@
        <version.scala>2.10.6</version.scala>
        <version.elasticsearch>2.4.4</version.elasticsearch>
        <version.jest>2.4.0</version.jest>
        <version.hbase-client>1.1.1</version.hbase-client>
        <version.spring-data-hadoop>2.3.0.RELEASE</version.spring-data-hadoop>
    </properties>
    <!--dependencyManagement作用子配置不写版本默认继承父配置-->
    <dependencyManagement>
@ -331,6 +333,11 @@
                <artifactId>spring-context</artifactId>
                <version>${version.spring}</version>
            </dependency>
            <dependency>
                <groupId>org.springframework</groupId>
                <artifactId>spring-beans</artifactId>
                <version>${version.spring}</version>
            </dependency>
            <dependency>
                <groupId>org.springframework</groupId>
@ -589,6 +596,41 @@
                <version>${verion.fastJson}</version>
                <!--json 包-->
            </dependency>
            <dependency>
                <groupId>org.springframework.data</groupId>
                <artifactId>spring-data-hadoop-hbase</artifactId>
                <version>${version.spring-data-hadoop}</version>
            </dependency>
            <!-- hbase start-->
            <dependency>
                <groupId>org.springframework.data</groupId>
                <artifactId>spring-data-hadoop-hbase</artifactId>
            </dependency>
            <dependency>
                <groupId>org.apache.hbase</groupId>
                <artifactId>hbase-protocol</artifactId>
                <version>${version.hbase-client}</version>
            </dependency>
            <dependency>
                <groupId>org.apache.hbase</groupId>
                <artifactId>hbase-client</artifactId>
                <version>${version.hbase-client}</version>
            </dependency>
            <dependency>
                <groupId>org.apache.hbase</groupId>
                <artifactId>hbase-common</artifactId>
                <version>${version.hbase-client}</version>
                <exclusions>
                    <exclusion>
                        <groupId>org.slf4j</groupId>
                        <artifactId>slf4j-log4j12</artifactId>
                    </exclusion>
                </exclusions>
            </dependency>
            <!-- hbase end-->
        </dependencies>