Bladeren bron

Merge branch 'dev' of chenweida/jw2.0 into dev

chenweida 7 jaren geleden
bovenliggende
commit
038e88af9d

+ 16 - 0
common/common-es/pom.xml

@ -18,5 +18,21 @@
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-elasticsearch</artifactId>
        </dependency>
        <dependency>
            <groupId>org.nlpcn</groupId>
            <artifactId>elasticsearch-sql</artifactId>
        </dependency>
        <dependency>
            <groupId>org.elasticsearch</groupId>
            <artifactId>elasticsearch</artifactId>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
        </dependency>
        <dependency>
            <groupId>io.searchbox</groupId>
            <artifactId>jest</artifactId>
        </dependency>
    </dependencies>
</project>

+ 88 - 0
common/common-es/src/main/java/com/yihu/jw/es/config/ElasticFactory.java

@ -0,0 +1,88 @@
package com.yihu.jw.es.config;
import io.searchbox.client.JestClient;
import io.searchbox.client.JestClientFactory;
import io.searchbox.client.config.HttpClientConfig;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.concurrent.TimeUnit;
/**
 * Created by chenweida on 2017/6/5.
 */
@Component
public class ElasticFactory {
    private static JestClientFactory factory = null;
    @Value("${es.host}")
    private String esHost;
    @Value("${es.port}")
    private String port;
    @Value("${es.tPort}")
    private String tPort;
    @Value("${es.clusterName}")
    private String clusterName;
//-----------------------------------jestClient----------------------------------------
    /**
     * @param "http://localhost:9200"
     * @return
     */
    public JestClient getJestClient() {
        if (factory == null) {
            //初始化链接
            init();
        }
        return factory.getObject();
    }
    /**
     * 初始化链接
     */
    public synchronized void init() {
        // Construct a new Jest client according to configuration via factory
        factory = new JestClientFactory();
        factory.setHttpClientConfig(new HttpClientConfig
                .Builder("http://" + esHost + ":" + port)
                .multiThreaded(true)
                .maxTotalConnection(50)// 最大链接
                .maxConnectionIdleTime(120, TimeUnit.SECONDS)//链接等待时间
                .connTimeout(60*1000)
               // .discoveryEnabled(true)
                .readTimeout(60*1000)//60秒
                .build());//得到链接
    }
    //-----------------------------------TransportClient----------------------------------------
    private Client transportClient;
    public Client getTransportClient() {
        try {
            initTranClient();
            return transportClient;
        } catch (Exception e) {
            e.printStackTrace();
        }
        return null;
    }
    private synchronized void initTranClient() throws UnknownHostException {
        if (transportClient == null) {
            Settings settings = Settings.settingsBuilder()
                   // .put("client.transport.sniff", true)//开启嗅探功能
                    .put("cluster.name", StringUtils.isEmpty(clusterName) ? "jkzl" : clusterName)//默认集群名字是jkzl
                    .build();
            transportClient = TransportClient.builder().settings(settings).build()
                    .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(esHost), Integer.valueOf(tPort)));
        }
    }
}

+ 116 - 0
common/common-es/src/main/java/com/yihu/jw/es/config/ElastricSearchSave.java

@ -0,0 +1,116 @@
package com.yihu.jw.es.config;
import com.alibaba.fastjson.JSONObject;
import com.yihu.jw.es.config.model.SaveModel;
import io.searchbox.client.JestClient;
import io.searchbox.core.*;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
import java.util.Date;
import java.util.List;
;
/**
 * Created by chenweida on 2017/6/2.
 */
@Component
@Scope("prototype")
public class ElastricSearchSave {
    private Logger logger = LoggerFactory.getLogger(ElastricSearchSave.class);
    @Autowired
    private ElasticFactory elasticFactory;
    public Boolean save(String index, String type, List<SaveModel> sms) {
        try {
            //得到链接
            JestClient jestClient = elasticFactory.getJestClient();
            int success = 0;
            int error = 0;
            Bulk.Builder bulk = new Bulk.Builder().defaultIndex(index).defaultType(type);
            for (SaveModel obj : sms) {
                try {
                    Index indexObj = new Index.Builder(obj).build();
                    success++;
                    bulk.addAction(indexObj);
                } catch (Exception e) {
                    logger.error(e.getMessage());
                    error++;
                }
            }
            BulkResult br = jestClient.execute(bulk.build());
            logger.info("save flag:" + br.isSucceeded());
            logger.info("save success:" + success);
            logger.info("save error:" + error);
            return br.isSucceeded();
        } catch (Exception e) {
            logger.error(" save error :" + e.getMessage());
        }
        return null;
    }
    public Boolean update(String index, String type, List<SaveModel> sms) {
        try {
            //得到链接
            JestClient jestClient = elasticFactory.getJestClient();
            int success = 0;
            int error = 0;
            boolean isSuccessed = true;
            Bulk.Builder bulk = new Bulk.Builder().defaultIndex(index).defaultType(type);
            for (SaveModel obj : sms) {
                try {
                    JSONObject jo = new JSONObject();
                    jo.put("doc", obj);
                    Update indexObj = new Update.Builder(jo.toString()).index(index).type(type).id(obj.getId()).build();
                    bulk.addAction(indexObj);
                    success++;
                } catch (Exception e) {
                    error++;
                    isSuccessed = false;
                }
            }
            BulkResult br = jestClient.execute(bulk.build());
            logger.info("update flag:" + br.isSucceeded());
            logger.info("update success:" + success);
            logger.info("update error:" + error);
            return isSuccessed;
        } catch (Exception e) {
            logger.error(" update error :" + e.getMessage());
        }
        return null;
    }
    /**
     * 删除
     */
    private void deleteData(String index, String type, List<SaveModel> saveModels) {
        try {
            JestClient jestClient = elasticFactory.getJestClient();
            //根据id批量删除
            Bulk.Builder bulk = new Bulk.Builder().defaultIndex(index).defaultType(type);
            for (SaveModel obj : saveModels) {
                Delete indexObj = new Delete.Builder(obj.getId()).build();
                bulk.addAction(indexObj);
            }
            BulkResult br = jestClient.execute(bulk.build());
            logger.info("delete data count:" + saveModels.size());
            logger.info("delete flag:" + br.isSucceeded());
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

+ 21 - 0
common/common-es/src/main/java/com/yihu/jw/es/config/model/SaveModel.java

@ -0,0 +1,21 @@
package com.yihu.jw.es.config.model;
import io.searchbox.annotations.JestId;
/**
 * es保存model的公共父类
 * Created by chenweida on 2017/11/3.
 */
public class SaveModel {
    @JestId
    private String id;
    public String getId() {
        return id;
    }
    public void setId(String id) {
        this.id = id;
    }
}

+ 5 - 0
common/common-es/src/main/resources/template.yml

@ -0,0 +1,5 @@
es:
  host:  59.61.92.90
  port: 9067 #默认是9200
  tPort: 9068 #http端口 默认是9300
  clusterName: jkzl

+ 47 - 37
common/common-hbase/pom.xml

@ -15,42 +15,52 @@
    <dependencies>
        <dependency>
        <groupId>org.apache.hbase</groupId>
        <artifactId>hbase-client</artifactId>
        <version>${version.hbase}</version>
        <exclusions>
            <exclusion>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
            <exclusion>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
            </exclusion>
            <exclusion>
                <groupId>javax.servlet</groupId>
                <artifactId>servlet-api</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-client</artifactId>
        <version>${version.hadoop}</version>
        <exclusions>
            <exclusion>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
            <exclusion>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
            </exclusion>
            <exclusion>
                <groupId>javax.servlet</groupId>
                <artifactId>servlet-api</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-lang3</artifactId>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-common</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-protocol</artifactId>
        </dependency>
        <dependency>
            <groupId>com.fasterxml.jackson.core</groupId>
            <artifactId>jackson-databind</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework</groupId>
            <artifactId>spring-beans</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework</groupId>
            <artifactId>spring-context</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.data</groupId>
            <artifactId>spring-data-hadoop-hbase</artifactId>
        </dependency>
    </dependencies>
</project>

+ 37 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/AbstractHBaseClient.java

@ -0,0 +1,37 @@
package com.yihu.ehr.hbase;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.security.UserGroupInformation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.hadoop.hbase.HbaseTemplate;
import java.io.IOException;
/**
 * @author hzp
 * @created 2017.05.03
 */
public class AbstractHBaseClient {
    @Autowired
    protected HbaseTemplate hbaseTemplate;
    /**
     * 创建连接
     */
    protected Connection getConnection() throws Exception {
        return getConnection(hbaseTemplate);
    }
    /**
     * 创建连接
     */
    protected Connection getConnection(HbaseTemplate hbaseTemplate) throws Exception {
        Connection connection = ConnectionFactory.createConnection(hbaseTemplate.getConfiguration());
        return connection;
    }
}

+ 155 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/HBaseAdmin.java

@ -0,0 +1,155 @@
package com.yihu.ehr.hbase;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.yihu.ehr.hbase.AbstractHBaseClient;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.springframework.data.hadoop.hbase.TableCallback;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
 * @author hzp
 * @created 2017.05.03
 */
@Service
public class HBaseAdmin extends AbstractHBaseClient {
    /**
     * 判断表是否存在
     */
    public boolean isTableExists(String tableName) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        boolean ex = admin.tableExists(TableName.valueOf(tableName));
        admin.close();
        connection.close();
        return ex;
    }
    /**
     * 创建表
     */
    public void createTable(String tableName, String... columnFamilies) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        if (!admin.tableExists(TableName.valueOf(tableName))) {
            HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));
            for (String fc : columnFamilies) {
                tableDescriptor.addFamily(new HColumnDescriptor(fc));
            }
            admin.createTable(tableDescriptor);
        }
        admin.close();
        connection.close();
    }
    /**
     * 模糊匹配表名
     */
    public List<String> getTableList(String regex, boolean includeSysTables) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        TableName[] tableNames;
        if (regex == null || regex.length() == 0) {
            tableNames = admin.listTableNames();
        } else {
            tableNames = admin.listTableNames(regex, includeSysTables);
        }
        List<String> tables = new ArrayList<>();
        for (TableName tableName : tableNames) {
            tables.add(tableName.getNameAsString());
        }
        admin.close();
        connection.close();
        return tables;
    }
    /**
     * 批量清空表数据
     */
    public void truncate(List<String> tables) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        try {
            for (String tableName : tables) {
                TableName tn = TableName.valueOf(tableName);
                if (admin.tableExists(TableName.valueOf(tableName))) {
                    HTableDescriptor descriptor = admin.getTableDescriptor(tn);
                    admin.disableTable(tn);
                    admin.deleteTable(tn);
                    admin.createTable(descriptor);
                }
                else{
                    System.out.print("not exit table "+tableName+".\r\n");
                }
                /*else{
                    HTableDescriptor descriptor = new HTableDescriptor(tableName);
                    descriptor.addFamily(new HColumnDescriptor("basic"));
                    descriptor.addFamily(new HColumnDescriptor("d"));
                    admin.createTable(descriptor);
                }*/
            }
        } finally {
            admin.close();
            connection.close();
        }
    }
    /**
     * 删除表结构
     */
    public void dropTable(String tableName) throws Exception {
        Connection connection = getConnection();
        Admin admin = connection.getAdmin();
        try {
            admin.disableTable(TableName.valueOf(tableName));
            admin.deleteTable(TableName.valueOf(tableName));
        } finally {
            admin.close();
            connection.close();
        }
    }
    public ObjectNode getTableMetaData(String tableName) {
        return hbaseTemplate.execute(tableName, new TableCallback<ObjectNode>() {
            public ObjectNode doInTable(HTableInterface table) throws Throwable {
                ObjectMapper objectMapper = new ObjectMapper();
                ObjectNode root = objectMapper.createObjectNode();
                HTableDescriptor tableDescriptor = table.getTableDescriptor();
                HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
                for (int i = 0; i < columnDescriptors.length; ++i) {
                    HColumnDescriptor columnDescriptor = columnDescriptors[i];
                    root.put(Integer.toString(i), Bytes.toString(columnDescriptor.getName()));
                }
                return root;
            }
        });
    }
}

+ 346 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/HBaseDao.java

@ -0,0 +1,346 @@
package com.yihu.ehr.hbase;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.hadoop.hbase.RowMapper;
import org.springframework.data.hadoop.hbase.TableCallback;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.*;
/**
 * 数据增删改查
 */
@Service
public class HBaseDao extends AbstractHBaseClient {
    @Autowired
    ObjectMapper objectMapper;
    /**
     *模糊匹配rowkey
     */
    public String[] findRowKeys(String tableName, String rowkeyRegEx) throws Exception {
        Scan scan = new Scan();
        scan.addFamily(Bytes.toBytes("basic"));
        scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(rowkeyRegEx)));
        List<String> list = new LinkedList<>();
        hbaseTemplate.find(tableName, scan, new RowMapper<Void>() {
            @Override
            public Void mapRow(Result result, int rowNum) throws Exception {
                list.add(Bytes.toString(result.getRow()));
                return null;
            }
        });
        return list.toArray(new String[list.size()]);
    }
    /**
     *表总条数
     */
    public Integer count(String tableName) throws Exception {
        Scan scan = new Scan();
        scan.addFamily(Bytes.toBytes("basic"));
        scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("^")));
        List<String> list = new LinkedList<>();
        hbaseTemplate.find(tableName, scan, new RowMapper<Void>() {
            @Override
            public Void mapRow(Result result, int rowNum) throws Exception {
                list.add(Bytes.toString(result.getRow()));
                return null;
            }
        });
        return list.size();
    }
    /**
     * 根据 rowkey获取一条记录
     */
    public String get(String tableName, String rowkey) {
        return hbaseTemplate.get(tableName, rowkey,new RowMapper<String>() {
            public String mapRow(Result result, int rowNum) throws Exception {
                if(!result.isEmpty())
                {
                    List<Cell> ceList = result.listCells();
                    Map<String, Object> map = new HashMap<String, Object>();
                    map.put("rowkey",rowkey);
                    if (ceList != null && ceList.size() > 0) {
                        for (Cell cell : ceList) {
                            //默认不加列族
                            // Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) +"_"
                            map.put(Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()),
                                    Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
                        }
                    }
                    return objectMapper.writeValueAsString(map);
                }
                else{
                    return "";
                }
            }
        });
    }
    /**
     * 通过表名  key 和 列族 和列 获取一个数据
     */
    public String get(String tableName ,String rowkey, String familyName, String qualifier) {
        return hbaseTemplate.get(tableName, rowkey,familyName,qualifier ,new RowMapper<String>(){
            public String mapRow(Result result, int rowNum) throws Exception {
                List<Cell> ceList =   result.listCells();
                String res = "";
                if(ceList!=null&&ceList.size()>0){
                    for(Cell cell:ceList){
                        res = Bytes.toString( cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                    }
                }
                return res;
            }
        });
    }
    /**
     * 通过rowkey获取某行数据
     */
    public Result getResult(String tableName, String rowKey) throws Exception {
        return hbaseTemplate.get(tableName, rowKey, new RowMapper<Result>() {
            public Result mapRow(Result result, int rowNum) throws Exception {
                return result;
            }
        });
    }
    /**
     * 通过rowkey获取多行数据
     */
    public Result[] getResultList(String tableName,List<String> rowKeys) throws Exception {
        return hbaseTemplate.execute(tableName, new TableCallback<Result[]>() {
            public Result[] doInTable(HTableInterface table) throws Throwable {
                List<Get> list = new ArrayList<Get>();
                for (String rowKey : rowKeys) {
                    Get get = new Get(Bytes.toBytes(rowKey));
                    list.add(get);
                }
                return  table.get(list);
            }
        });
    }
    /**
     * 通过rowkey获取某行数据
     */
    public Map<String, Object> getResultMap(String tableName, String rowKey) throws Exception {
        return hbaseTemplate.get(tableName, rowKey, new RowMapper<Map<String, Object>>() {
            public Map<String, Object> mapRow(Result result, int rowNum) throws Exception {
                Map<String, Object> map = null;
                if(result!=null) {
                    List<Cell> ceList = result.listCells();
                    if (ceList != null && ceList.size() > 0) {
                        map = new HashMap<String, Object>();
                        map.put("rowkey", rowKey);
                        for (Cell cell : ceList) {
                            //默认不加列族
                            // Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) +"_"
                            map.put(Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()),
                                    Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
                        }
                    }
                }
                return map;
            }
        });
    }
    /**
     * 修改某行某列值
     */
    public void put(String tableName ,String rowkey, String familyName, String qualifier,String value) throws Exception
    {
        hbaseTemplate.execute(tableName, new TableCallback<String>() {
            public String doInTable(HTableInterface table) throws Throwable {
                Put p = new Put(rowkey.getBytes());
                p.add(familyName.getBytes(), qualifier.getBytes(), value.getBytes());
                table.put(p);
                return null;
            }
        });
    }
    /**
     * 新增行
     */
    public void add(String tableName , String rowkey, Map<String,Map<String,String>> family) throws Exception
    {
        hbaseTemplate.execute(tableName, new TableCallback<String>() {
            public String doInTable(HTableInterface table) throws Throwable {
                Put p = new Put(rowkey.getBytes());
                for(String familyName : family.keySet())
                {
                    Map<String,String> map = family.get(familyName);
                    for (String qualifier : map.keySet())
                    {
                        String value = map.get(qualifier);
                        p.add(familyName.getBytes(), qualifier.getBytes(), value.getBytes());
                    }
                }
                table.put(p);
                return null;
            }
        });
    }
    /**
     * 新增数据
     */
    public void add(String tableName, String rowKey, String family, Object[] columns, Object[] values) throws Exception {
        hbaseTemplate.execute(tableName, new TableCallback<Object>() {
            public Object doInTable(HTableInterface htable) throws Throwable {
                Put put = new Put(Bytes.toBytes(rowKey));
                for (int j = 0; j < columns.length; j++) {
                    //为空字段不保存
                    if(values[j]!=null)
                    {
                        String column = String.valueOf(columns[j]);
                        String value = String.valueOf(values[j]);
                        put.addColumn(Bytes.toBytes(family),
                                Bytes.toBytes(column),
                                Bytes.toBytes(value));
                    }
                }
                htable.put(put);
                return null;
            }
        });
    }
    /**
     * 根据 rowkey删除一条记录
     */
    public void delete(String tableName, String rowkey)  {
        hbaseTemplate.execute(tableName, new TableCallback<String>() {
            public String doInTable(HTableInterface table) throws Throwable {
                Delete d = new Delete(rowkey.getBytes());
                table.delete(d);
                return null;
            }
        });
    }
    /**
     * 批量删除数据
     */
    public Object[] deleteBatch(String tableName, String[] rowKeys) throws Exception {
        return hbaseTemplate.execute(tableName, new TableCallback<Object[]>() {
            public Object[] doInTable(HTableInterface table) throws Throwable {
                List<Delete> deletes = new ArrayList<>(rowKeys.length);
                for (String rowKey : rowKeys) {
                    Delete delete = new Delete(Bytes.toBytes(rowKey));
                    deletes.add(delete);
                }
                Object[] results = new Object[deletes.size()];
                table.batch(deletes, results);
                return results;
            }
        });
    }
    /**
     * 删除列族
     */
    public void deleteFamily(String tableName, String rowKey, String familyName) throws Exception {
        hbaseTemplate.delete(tableName, rowKey, familyName);
    }
    /**
     * 删除某列
     */
    public void deleteColumn(String tableName, String rowKey, String familyName, String columnName) throws Exception {
        hbaseTemplate.delete(tableName, rowKey, familyName, columnName);
    }
    /************************************* Bean使用原型模式 ***************************************************************/
    /**
     * 保存数据 原型模式
     */
    public void save(String tableName, TableBundle tableBundle) throws Exception {
        hbaseTemplate.execute(tableName, new TableCallback<Object>() {
            public Object doInTable(HTableInterface htable) throws Throwable {
                List<Put> puts = tableBundle.putOperations();
                Object[] results = new Object[puts.size()];
                htable.batch(puts, results);
                return null;
            }
        });
    }
    /**
     * 查询数据 原型模式
     */
    public Object[] get(String tableName, TableBundle tableBundle) {
        return hbaseTemplate.execute(tableName, new TableCallback<Object[]>() {
            public Object[] doInTable(HTableInterface table) throws Throwable {
                List<Get> gets = tableBundle.getOperations();
                Object[] results = new Object[gets.size()];
                table.batch(gets, results);
                if (results.length > 0 && results[0].toString().equals("keyvalues=NONE")) return null;
                return results;
            }
        });
    }
    /**
     * 删除数据 原型模式
     */
    public void delete(String tableName, TableBundle tableBundle) {
        hbaseTemplate.execute(tableName, new TableCallback<Object[]>() {
            public Object[] doInTable(HTableInterface table) throws Throwable {
                List<Delete> deletes = tableBundle.deleteOperations();
                Object[] results = new Object[deletes.size()];
                table.batch(deletes, results);
                return null;
            }
        });
    }
}

+ 166 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/TableBundle.java

@ -0,0 +1,166 @@
package com.yihu.ehr.hbase;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
import java.util.*;
import java.util.stream.Collectors;
/**
 * 将HBase中的行,列族,列捆绑成一束。并一次性生成所需要的Get, Put操作。
 * <p>
 * 仅支持单表操作。
 * <p>
 * 虽然支持多种HBase操作,但请注意,一次只能用于一种操作,如:Get,Put,Delete不能混用,
 * 否则将出现难以预料的后果。
 *
 * @author Sand
 * @created 2016.04.27 14:38
 */
public class TableBundle {
    Map<String, Row> rows = new HashMap<>();
    public void addRows(String... rowkeys) {
        for (String rowkey : rowkeys) {
            rows.put(rowkey, null);
        }
    }
    public void addFamily(String rowkey, Object family) {
        Row row = getRow(rowkey);
        row.addFamily(family.toString());
    }
    public void addColumns(String rowkey, Object family, String[] columns) {
        Row row = getRow(rowkey);
        row.addColumns(family.toString(), columns);
    }
    public void addValues(String rowkey, Object family, Map<String, String> values) {
        Row row = getRow(rowkey);
        row.addValues(family.toString(), values);
    }
    public void clear() {
        rows.clear();
    }
    public List<Get> getOperations() {
        List<Get> gets = new ArrayList<>(rows.size());
        for (String rowkey : rows.keySet()) {
            Get get = new Get(Bytes.toBytes(rowkey));
            Row row = rows.get(rowkey);
            if (row != null) {
                for (String family : row.getFamilies()) {
                    Set<Object> columns = row.getCells(family);
                    if (CollectionUtils.isEmpty(columns)) {
                        get.addFamily(Bytes.toBytes(family));
                    }
                    for (Object column : columns) {
                        get.addColumn(Bytes.toBytes(family), Bytes.toBytes((String) column));
                    }
                }
            }
            gets.add(get);
        }
        return gets;
    }
    public List<Put> putOperations() {
        List<Put> puts = new ArrayList<>(rows.values().size());
        for (String rowkey : rows.keySet()) {
            Put put = new Put(Bytes.toBytes(rowkey));
            Row row = rows.get(rowkey);
            for (String family : row.getFamilies()) {
                Set<Object> columns = row.getCells(family);
                for (Object column : columns) {
                    Pair<String, String> pair = (Pair<String, String>) column;
                    if (StringUtils.isNotEmpty(pair.getRight())) {
                        put.addColumn(Bytes.toBytes(family),
                                Bytes.toBytes(pair.getLeft()),
                                Bytes.toBytes(pair.getRight()));
                    }
                }
            }
            puts.add(put);
        }
        return puts;
    }
    public List<Delete> deleteOperations() {
        List<Delete> deletes = new ArrayList<>(rows.values().size());
        for (String rowkey : rows.keySet()) {
            Delete delete = new Delete(Bytes.toBytes(rowkey));
            deletes.add(delete);
        }
        return deletes;
    }
    private Row getRow(String rowkey) {
        Row row = rows.get(rowkey);
        if (row == null) {
            row = new Row();
            rows.put(rowkey, row);
        }
        return row;
    }
    /**
     * HBase中的一行
     */
    public static class Row {
        private Map<String, Set<Object>> cells = new HashMap<>();   // key为family,value为columns
        public void addFamily(String family) {
            cells.put(family, null);
        }
        public void addColumns(String family, String... columns) {
            Set value = getFamily(family);
            for (String column : columns) {
                value.add(column);
            }
        }
        public void addValues(String family, Map<String, String> values) {
            Set value = getFamily(family);
            value.addAll(values.keySet().stream().map(key -> new ImmutablePair<>(key, values.get(key))).collect(Collectors.toList()));
        }
        public Set<String> getFamilies() {
            return cells.keySet();
        }
        public Set<Object> getCells(String family) {
            return cells.get(family);
        }
        private Set<Object> getFamily(String family) {
            Set value = cells.get(family);
            if (value == null) {
                value = new TreeSet<>();
                cells.put(family, value);
            }
            return value;
        }
    }
}

+ 88 - 0
common/common-hbase/src/main/java/com/yihu/ehr/hbase/config/HbaseConfig.java

@ -0,0 +1,88 @@
package com.yihu.ehr.hbase.config;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.security.UserGroupInformation;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.hadoop.hbase.HbaseTemplate;
import org.springframework.data.hadoop.hbase.TableCallback;
import java.util.*;
/**
 * @author Sand
 * @version 1.0
 * @created 2015.11.28 16:26
 */
@Configuration
@ConfigurationProperties(prefix = "hadoop")
public class HbaseConfig{
    private Map<String, String> hbaseProperties = new HashMap<>();
    public Map<String, String> getHbaseProperties(){
        return this.hbaseProperties;
    }
    @Value("${hadoop.user.name}")
    private String user;
    @Bean
    public org.apache.hadoop.conf.Configuration configuration() {
        Set<String> keys = new HashSet<>(hbaseProperties.keySet());
        for (String key : keys){
            String value = hbaseProperties.remove(key);
            key = key.replaceAll("^\\d{1,2}\\.", "");
            hbaseProperties.put(key, value);
        }
        org.apache.hadoop.conf.Configuration configuration = HBaseConfiguration.create();
        hbaseProperties.keySet().stream().filter(key -> hbaseProperties.get(key) != null).forEach(key -> {
            configuration.set(key, hbaseProperties.get(key));
        });
        return configuration;
    }
    @Bean
    public HbaseTemplate hbaseTemplate(org.apache.hadoop.conf.Configuration configuration){
        HbaseTemplate hbaseTemplate = new HbaseTemplate();
        hbaseTemplate.setConfiguration(configuration);
        try
        {
            System.setProperty("HADOOP_USER_NAME", user);
            String tableName = "HealthProfile";
            Connection connection = ConnectionFactory.createConnection(configuration);
            Admin admin = connection.getAdmin();
            boolean ex = admin.tableExists(TableName.valueOf(tableName));
            //判断是否存在
            if(ex)
            {
                hbaseTemplate.execute(tableName, new TableCallback<Object>() {
                    @Override
                    public Object doInTable(HTableInterface table) throws Throwable {
                        Get get = new Get(Bytes.toBytes("connection-init"));
                        Result result = table.get(get);
                        return result;
                    }
                });
            }
            admin.close();
            connection.close();
        }
        catch (Exception ex)
        {
            ex.printStackTrace();
        }
        return hbaseTemplate;
    }
}

+ 163 - 0
common/common-hbase/src/main/resources/hbase/core-site.xml

@ -0,0 +1,163 @@
  <configuration>
    
    <property>
      <name>fs.defaultFS</name>
      <value>hdfs://dev</value>
    </property>
    
    <property>
      <name>fs.trash.interval</name>
      <value>360</value>
    </property>
    
    <property>
      <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
      <value>120</value>
    </property>
    
    <property>
      <name>ha.zookeeper.quorum</name>
      <value>node1.hde.h3c.com:2181,node2.hde.h3c.com:2181,node3.hde.h3c.com:2181</value>
    </property>
    
    <property>
      <name>hadoop.http.authentication.simple.anonymous.allowed</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hbase.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hbase.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hcat.groups</name>
      <value>users</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hcat.hosts</name>
      <value>node2.hde.h3c.com</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hdfs.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hdfs.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hive.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hive.hosts</name>
      <value>node2.hde.h3c.com</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.HTTP.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.HTTP.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hue.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.hue.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.oozie.groups</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.proxyuser.oozie.hosts</name>
      <value>*</value>
    </property>
    
    <property>
      <name>hadoop.security.auth_to_local</name>
      <value>DEFAULT</value>
    </property>
    
    <property>
      <name>hadoop.security.authentication</name>
      <value>simple</value>
    </property>
    
    <property>
      <name>hadoop.security.authorization</name>
      <value>false</value>
    </property>
    
    <property>
      <name>hadoop.security.key.provider.path</name>
      <value></value>
    </property>
    
    <property>
      <name>io.compression.codecs</name>
      <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
    </property>
    
    <property>
      <name>io.file.buffer.size</name>
      <value>131072</value>
    </property>
    
    <property>
      <name>io.serializations</name>
      <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
    </property>
    
    <property>
      <name>ipc.client.connect.max.retries</name>
      <value>50</value>
    </property>
    
    <property>
      <name>ipc.client.connection.maxidletime</name>
      <value>30000</value>
    </property>
    
    <property>
      <name>ipc.client.idlethreshold</name>
      <value>8000</value>
    </property>
    
    <property>
      <name>ipc.server.tcpnodelay</name>
      <value>true</value>
    </property>
    
    <property>
      <name>mapreduce.jobtracker.webinterface.trusted</name>
      <value>false</value>
    </property>
    
    <property>
      <name>net.topology.script.file.name</name>
      <value>/etc/hadoop/conf/topology_script.py</value>
    </property>
    
  </configuration>

+ 243 - 0
common/common-hbase/src/main/resources/hbase/hbase-site.xml

@ -0,0 +1,243 @@
  <configuration>
    
    <property>
      <name>dfs.domain.socket.path</name>
      <value>/var/lib/hadoop-hdfs/dn_socket</value>
    </property>
    
    <property>
      <name>hbase.bulkload.staging.dir</name>
      <value>/apps/hbase/staging</value>
    </property>
    
    <property>
      <name>hbase.client.keyvalue.maxsize</name>
      <value>1048576</value>
    </property>
    
    <property>
      <name>hbase.client.retries.number</name>
      <value>35</value>
    </property>
    
    <property>
      <name>hbase.client.scanner.caching</name>
      <value>100</value>
    </property>
    
    <property>
      <name>hbase.cluster.distributed</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.coprocessor.master.classes</name>
      <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    
    <property>
      <name>hbase.coprocessor.region.classes</name>
      <value>org.apache.hadoop.hbase.security.access.AccessController,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
    </property>
    
    <property>
      <name>hbase.coprocessor.regionserver.classes</name>
      <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    
    <property>
      <name>hbase.defaults.for.version.skip</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.hregion.majorcompaction</name>
      <value>604800000</value>
    </property>
    
    <property>
      <name>hbase.hregion.majorcompaction.jitter</name>
      <value>0.50</value>
    </property>
    
    <property>
      <name>hbase.hregion.max.filesize</name>
      <value>10737418240</value>
    </property>
    
    <property>
      <name>hbase.hregion.memstore.block.multiplier</name>
      <value>4</value>
    </property>
    
    <property>
      <name>hbase.hregion.memstore.flush.size</name>
      <value>134217728</value>
    </property>
    
    <property>
      <name>hbase.hregion.memstore.mslab.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.hstore.blockingStoreFiles</name>
      <value>10</value>
    </property>
    
    <property>
      <name>hbase.hstore.compaction.max</name>
      <value>10</value>
    </property>
    
    <property>
      <name>hbase.hstore.compactionThreshold</name>
      <value>3</value>
    </property>
    
    <property>
      <name>hbase.local.dir</name>
      <value>${hbase.tmp.dir}/local</value>
    </property>
    
    <property>
      <name>hbase.master.info.bindAddress</name>
      <value>0.0.0.0</value>
    </property>
    
    <property>
      <name>hbase.master.info.port</name>
      <value>16010</value>
    </property>
    
    <property>
      <name>hbase.master.port</name>
      <value>16000</value>
    </property>
    
    <property>
      <name>hbase.regionserver.global.memstore.size</name>
      <value>0.4</value>
    </property>
    
    <property>
      <name>hbase.regionserver.handler.count</name>
      <value>30</value>
    </property>
    
    <property>
      <name>hbase.regionserver.info.port</name>
      <value>16030</value>
    </property>
    
    <property>
      <name>hbase.regionserver.port</name>
      <value>16020</value>
    </property>
    
    <property>
      <name>hbase.regionserver.thrift.http</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.regionserver.wal.codec</name>
      <value>org.apache.hadoop.hbase.regionserver.wal.WALCellCodec</value>
    </property>
    
    <property>
      <name>hbase.replication</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.rootdir</name>
      <value>hdfs://dev/apps/hbase/data</value>
    </property>
    
    <property>
      <name>hbase.rpc.protection</name>
      <value>authentication</value>
    </property>
    
    <property>
      <name>hbase.rpc.timeout</name>
      <value>90000</value>
    </property>
    
    <property>
      <name>hbase.security.authentication</name>
      <value>simple</value>
    </property>
    
    <property>
      <name>hbase.security.authorization</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.superuser</name>
      <value>hbase</value>
    </property>
    
    <property>
      <name>hbase.thrift.support.proxyuser</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hbase.tmp.dir</name>
      <value>/hadoop/hbase</value>
    </property>
    
    <property>
      <name>hbase.zookeeper.property.clientPort</name>
      <value>2181</value>
    </property>
    
    <property>
      <name>hbase.zookeeper.quorum</name>
      <value>node1.hde.h3c.com,node2.hde.h3c.com,node3.hde.h3c.com</value>
    </property>
    
    <property>
      <name>hbase.zookeeper.useMulti</name>
      <value>true</value>
    </property>
    
    <property>
      <name>hfile.block.cache.size</name>
      <value>0.40</value>
    </property>
    
    <property>
      <name>phoenix.query.timeoutMs</name>
      <value>60000</value>
    </property>
    
    <property>
      <name>replication.replicationsource.implementation</name>
      <value>com.ngdata.sep.impl.SepReplicationSource</value>
    </property>
    
    <property>
      <name>replication.source.nb.capacity</name>
      <value>1000</value>
    </property>
    
    <property>
      <name>replication.source.ratio</name>
      <value>1</value>
    </property>
    
    <property>
      <name>zookeeper.session.timeout</name>
      <value>90000</value>
    </property>
    
    <property>
      <name>zookeeper.znode.parent</name>
      <value>/hbase-unsecure</value>
    </property>
    
  </configuration>

+ 348 - 0
common/common-hbase/src/main/resources/hbase/hdfs-site.xml

@ -0,0 +1,348 @@
  <configuration>
    
    <property>
      <name>dfs.block.access.token.enable</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.blockreport.initialDelay</name>
      <value>120</value>
    </property>
    
    <property>
      <name>dfs.blocksize</name>
      <value>134217728</value>
    </property>
    
    <property>
      <name>dfs.client.failover.proxy.provider.dev</name>
      <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    
    <property>
      <name>dfs.client.read.shortcircuit</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.client.read.shortcircuit.streams.cache.size</name>
      <value>4096</value>
    </property>
    
    <property>
      <name>dfs.client.retry.policy.enabled</name>
      <value>false</value>
    </property>
    
    <property>
      <name>dfs.cluster.administrators</name>
      <value> hdfs</value>
    </property>
    
    <property>
      <name>dfs.content-summary.limit</name>
      <value>5000</value>
    </property>
    
    <property>
      <name>dfs.datanode.address</name>
      <value>0.0.0.0:50010</value>
    </property>
    
    <property>
      <name>dfs.datanode.balance.bandwidthPerSec</name>
      <value>6250000</value>
    </property>
    
    <property>
      <name>dfs.datanode.data.dir</name>
      <value>/opt/hadoop/hdfs/data</value>
    </property>
    
    <property>
      <name>dfs.datanode.data.dir.perm</name>
      <value>750</value>
    </property>
    
    <property>
      <name>dfs.datanode.du.reserved</name>
      <value>1073741824</value>
    </property>
    
    <property>
      <name>dfs.datanode.failed.volumes.tolerated</name>
      <value>0</value>
    </property>
    
    <property>
      <name>dfs.datanode.http.address</name>
      <value>0.0.0.0:50075</value>
    </property>
    
    <property>
      <name>dfs.datanode.https.address</name>
      <value>0.0.0.0:50475</value>
    </property>
    
    <property>
      <name>dfs.datanode.ipc.address</name>
      <value>0.0.0.0:8010</value>
    </property>
    
    <property>
      <name>dfs.datanode.max.transfer.threads</name>
      <value>4096</value>
    </property>
    
    <property>
      <name>dfs.domain.socket.path</name>
      <value>/var/lib/hadoop-hdfs/dn_socket</value>
    </property>
    
    <property>
      <name>dfs.encrypt.data.transfer.cipher.suites</name>
      <value>AES/CTR/NoPadding</value>
    </property>
    
    <property>
      <name>dfs.encryption.key.provider.uri</name>
      <value></value>
    </property>
    
    <property>
      <name>dfs.ha.automatic-failover.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.ha.fencing.methods</name>
      <value>shell(/bin/true)</value>
    </property>
    
    <property>
      <name>dfs.ha.namenodes.dev</name>
      <value>nn1,nn2</value>
    </property>
    
    <property>
      <name>dfs.heartbeat.interval</name>
      <value>3</value>
    </property>
    
    <property>
      <name>dfs.hosts.exclude</name>
      <value>/etc/hadoop/conf/dfs.exclude</value>
    </property>
    
    <property>
      <name>dfs.http.policy</name>
      <value>HTTP_ONLY</value>
    </property>
    
    <property>
      <name>dfs.https.port</name>
      <value>50470</value>
    </property>
    
    <property>
      <name>dfs.journalnode.edits.dir</name>
      <value>/hadoop/hdfs/journal</value>
    </property>
    
    <property>
      <name>dfs.journalnode.http-address</name>
      <value>0.0.0.0:8480</value>
    </property>
    
    <property>
      <name>dfs.journalnode.https-address</name>
      <value>0.0.0.0:8481</value>
    </property>
    
    <property>
      <name>dfs.namenode.accesstime.precision</name>
      <value>0</value>
    </property>
    
    <property>
      <name>dfs.namenode.audit.log.async</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.avoid.read.stale.datanode</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.avoid.write.stale.datanode</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.dir</name>
      <value>/opt/hadoop/hdfs/namesecondary</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.edits.dir</name>
      <value>${dfs.namenode.checkpoint.dir}</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.period</name>
      <value>21600</value>
    </property>
    
    <property>
      <name>dfs.namenode.checkpoint.txns</name>
      <value>1000000</value>
    </property>
    
    <property>
      <name>dfs.namenode.fslock.fair</name>
      <value>false</value>
    </property>
    
    <property>
      <name>dfs.namenode.handler.count</name>
      <value>100</value>
    </property>
    
    <property>
      <name>dfs.namenode.http-address</name>
      <value>node1.hde.h3c.com:50070</value>
    </property>
    
    <property>
      <name>dfs.namenode.http-address.dev.nn1</name>
      <value>node1.hde.h3c.com:50070</value>
    </property>
    
    <property>
      <name>dfs.namenode.http-address.dev.nn2</name>
      <value>node2.hde.h3c.com:50070</value>
    </property>
    
    <property>
      <name>dfs.namenode.https-address</name>
      <value>node1.hde.h3c.com:50470</value>
    </property>
    
    <property>
      <name>dfs.namenode.https-address.dev.nn1</name>
      <value>node1.hde.h3c.com:50470</value>
    </property>
    
    <property>
      <name>dfs.namenode.https-address.dev.nn2</name>
      <value>node2.hde.h3c.com:50470</value>
    </property>
    
    <property>
      <name>dfs.namenode.name.dir</name>
      <value>/opt/hadoop/hdfs/namenode</value>
    </property>
    
    <property>
      <name>dfs.namenode.name.dir.restore</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.namenode.rpc-address</name>
      <value>node1.hde.h3c.com:8020</value>
    </property>
    
    <property>
      <name>dfs.namenode.rpc-address.dev.nn1</name>
      <value>node1.hde.h3c.com:8020</value>
    </property>
    
    <property>
      <name>dfs.namenode.rpc-address.dev.nn2</name>
      <value>node2.hde.h3c.com:8020</value>
    </property>
    
    <property>
      <name>dfs.namenode.safemode.threshold-pct</name>
      <value>0.99</value>
    </property>
    
    <property>
      <name>dfs.namenode.secondary.http-address</name>
      <value>localhost:50090</value>
    </property>
    
    <property>
      <name>dfs.namenode.shared.edits.dir</name>
      <value>qjournal://node1.hde.h3c.com:8485;node2.hde.h3c.com:8485;node3.hde.h3c.com:8485/dev</value>
    </property>
    
    <property>
      <name>dfs.namenode.stale.datanode.interval</name>
      <value>30000</value>
    </property>
    
    <property>
      <name>dfs.namenode.startup.delay.block.deletion.sec</name>
      <value>3600</value>
    </property>
    
    <property>
      <name>dfs.namenode.write.stale.datanode.ratio</name>
      <value>1.0f</value>
    </property>
    
    <property>
      <name>dfs.nameservices</name>
      <value>dev</value>
    </property>
    
    <property>
      <name>dfs.permissions.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.permissions.superusergroup</name>
      <value>hdfs</value>
    </property>
    
    <property>
      <name>dfs.replication</name>
      <value>3</value>
    </property>
    
    <property>
      <name>dfs.replication.max</name>
      <value>50</value>
    </property>
    
    <property>
      <name>dfs.support.append</name>
      <value>true</value>
    </property>
    
    <property>
      <name>dfs.webhdfs.enabled</name>
      <value>true</value>
    </property>
    
    <property>
      <name>fs.permissions.umask-mode</name>
      <value>022</value>
    </property>
    
    <property>
      <name>nfs.exports.allowed.hosts</name>
      <value>* rw</value>
    </property>
    
    <property>
      <name>nfs.file.dump.dir</name>
      <value>/tmp/.hdfs-nfs</value>
    </property>
    
  </configuration>

+ 0 - 23
common/common-mongodb/pom.xml

@ -1,23 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>com.yihu.jw</groupId>
        <artifactId>common-lib-parent-pom</artifactId>
        <version>1.0.0</version>
        <relativePath>../../common-lib-parent-pom/pom.xml</relativePath>
    </parent>
    <artifactId>common-mongodb</artifactId>
    <version>1.0.0</version>
    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-mongodb</artifactId>
        </dependency>
    </dependencies>
</project>

+ 15 - 0
common/common-redis/src/main/resources/template.yml

@ -0,0 +1,15 @@
spring:
  redis:
    host: 10.95.22.142 # ip
    port: 6380 # R端口
    password: jkzlehr #密码
    database: 0 # 默认使用DB0
    timeout: 0 # 连接超时时间(毫秒)
      #sentinel:
      #  master: # Name of Redis server.
      #  nodes: # Comma-separated list of host:port pairs.
    pool: ##连接池配置
      max-active: 8 # 连接池最大连接数(使用负值表示没有限制)
      max-idle: 8 # 连接池中的最大空闲连接
      max-wait: -1 # 连接池最大阻塞等待时间(使用负值表示没有限制)
      min-idle: 1 # 连接池中的最小空闲连接

+ 75 - 4
svr-lib-parent-pom/pom.xml

@ -68,14 +68,21 @@
        <version.spring-elasticsearch>2.1.3.RELEASE</version.spring-elasticsearch>
        <version.jest>2.4.0</version.jest>
        <version.json>20160212</version.json>
        <verion.fastJson>1.2.17</verion.fastJson>
        <version.net-json>2.4</version.net-json>
        <version.springside>4.2.3-GA</version.springside>
        <version.common.lang3>3.1</version.common.lang3>
        <version.common.lang3>3.2.1</version.common.lang3>
        <version.elasticsearch>2.4.4</version.elasticsearch>
        <version.elasticsearch-sql>2.4.1.0</version.elasticsearch-sql>
        <version.redis>1.5.3.RELEASE</version.redis>
        <version.hbase>1.1.12</version.hbase>
        <version.hadoop>2.7.4</version.hadoop>
        <scala.version>2.10.6</scala.version>
        <version.scala>2.10.6</version.scala>
        <version.elasticsearch>2.4.4</version.elasticsearch>
        <version.jest>2.4.0</version.jest>
        <version.hbase-client>1.1.1</version.hbase-client>
        <version.spring-data-hadoop>2.3.0.RELEASE</version.spring-data-hadoop>
    </properties>
    <!--dependencyManagement作用子配置不写版本默认继承父配置-->
    <dependencyManagement>
@ -326,6 +333,11 @@
                <artifactId>spring-context</artifactId>
                <version>${version.spring}</version>
            </dependency>
            <dependency>
                <groupId>org.springframework</groupId>
                <artifactId>spring-beans</artifactId>
                <version>${version.spring}</version>
            </dependency>
            <dependency>
                <groupId>org.springframework</groupId>
@ -552,14 +564,73 @@
            <dependency>
                <groupId>org.scala-lang</groupId>
                <artifactId>scala-library</artifactId>
                <version>${scala.version}</version>
                <version>${version.scala}</version>
            </dependency>
            <dependency>
                <groupId>org.scala-lang</groupId>
                <artifactId>scala-compiler</artifactId>
                <version>${scala.version}</version>
                <version>${version.scala}</version>
            </dependency>
            <!--scala编译 end-->
            <!--elasticsearch start-->
            <dependency>
                <groupId>org.nlpcn</groupId>
                <artifactId>elasticsearch-sql</artifactId>
                <version>${version.elasticsearch-sql}</version>
            </dependency>
            <dependency>
                <groupId>org.elasticsearch</groupId>
                <artifactId>elasticsearch</artifactId>
                <version>${version.elasticsearch}</version>
            </dependency>
            <dependency>
                <groupId>io.searchbox</groupId>
                <artifactId>jest</artifactId>
                <version>${version.jest}</version>
            </dependency>
            <!--elasticsearch end-->
            <!--alibaba  json 包-->
            <dependency>
                <groupId>com.alibaba</groupId>
                <artifactId>fastjson</artifactId>
                <version>${verion.fastJson}</version>
                <!--json 包-->
            </dependency>
            <dependency>
                <groupId>org.springframework.data</groupId>
                <artifactId>spring-data-hadoop-hbase</artifactId>
                <version>${version.spring-data-hadoop}</version>
            </dependency>
            <!-- hbase start-->
            <dependency>
                <groupId>org.springframework.data</groupId>
                <artifactId>spring-data-hadoop-hbase</artifactId>
            </dependency>
            <dependency>
                <groupId>org.apache.hbase</groupId>
                <artifactId>hbase-protocol</artifactId>
                <version>${version.hbase-client}</version>
            </dependency>
            <dependency>
                <groupId>org.apache.hbase</groupId>
                <artifactId>hbase-client</artifactId>
                <version>${version.hbase-client}</version>
            </dependency>
            <dependency>
                <groupId>org.apache.hbase</groupId>
                <artifactId>hbase-common</artifactId>
                <version>${version.hbase-client}</version>
                <exclusions>
                    <exclusion>
                        <groupId>org.slf4j</groupId>
                        <artifactId>slf4j-log4j12</artifactId>
                    </exclusion>
                </exclusions>
            </dependency>
            <!-- hbase end-->
        </dependencies>

+ 34 - 0
svr/svr-demo/pom.xml

@ -0,0 +1,34 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>com.yihu.jw</groupId>
        <artifactId>svr-lib-parent-pom</artifactId>
        <version>1.0.0</version>
        <relativePath>../../svr-lib-parent-pom/pom.xml</relativePath>
    </parent>
    <artifactId>svr-demo</artifactId>
    <version>1.0.0</version>
    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-jpa</artifactId>
        </dependency>
        <dependency>
            <groupId>com.yihu.jw</groupId>
            <artifactId>common-mysql</artifactId>
        </dependency>
        <dependency>
            <groupId>com.yihu.jw</groupId>
            <artifactId>common-quartz</artifactId>
        </dependency>
    </dependencies>
</project>

+ 16 - 0
svr/svr-demo/src/main/java/com/yihu/jw/DemoApplication.java

@ -0,0 +1,16 @@
package com.yihu.jw;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
/**
 * Created by chenweida on 2017/11/3.
 */
@SpringBootApplication
public class DemoApplication {
    public static void main(String[] args) {
        SpringApplication.run(DemoApplication.class, args);
    }
}

+ 32 - 0
svr/svr-demo/src/main/java/com/yihu/jw/controller/DemoController.java

@ -0,0 +1,32 @@
package com.yihu.jw.controller;
import com.yihu.jw.config.quartz.QuartzHelper;
import com.yihu.jw.restmodel.common.Envelop;
import org.quartz.SchedulerException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.quartz.SchedulerFactoryBean;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
 * Created by chenweida on 2017/11/3.
 */
@RestController
public class DemoController {
    @Autowired
    SchedulerFactoryBean schedulerFactoryBean;
    @Autowired
    private QuartzHelper quartzHelper;
    @GetMapping("demo")
    public String demo() throws Exception {
        System.out.println(schedulerFactoryBean.getScheduler().getSchedulerName());
        return "123";
    }
}

+ 31 - 0
svr/svr-demo/src/main/resources/application.yml

@ -0,0 +1,31 @@
##如果有配置服务的话,远程服务器和本地服务器配置不一致的情况下,优先远程的为主  git上 svr-base ->  git application ->本地 appliction ->本地 bootstarp
spring:
  application:
    name:  svr-demo  ##注册到发现服务的id 如果id一样 eurika会自动做负载
  datasource:
    driver-class-name: com.mysql.jdbc.Driver
    max-active: 50
    max-idle: 50 #最大空闲连接
    min-idle: 10 #最小空闲连接
    validation-query-timeout: 20
    log-validation-errors: true
    validation-interval: 60000 #避免过度验证,保证验证不超过这个频率——以毫秒为单位。如果一个连接应该被验证,但上次验证未达到指定间隔,将不再次验证。
    validation-query: SELECT 1 #SQL 查询, 用来验证从连接池取出的连接, 在将连接返回给调用者之前。 如果指定, 则查询必须是一个SQL SELECT 并且必须返回至少一行记录
    test-on-borrow: true #指明是否在从池中取出连接前进行检验, 如果检验失败, 则从池中去除连接并尝试取出另一个。注意: 设置为true 后如果要生效,validationQuery 参数必须设置为非空字符串
    test-on-return: true #指明是否在归还到池中前进行检验 注意: 设置为true 后如果要生效validationQuery 参数必须设置为非空字符串
    idle-timeout: 30000
    connection-test-query: SELECT 1
    num-tests-per-eviction-run: 50 #在每次空闲连接回收器线程(如果有)运行时检查的连接数量,最好和maxActive
    test-while-idle: true #指明连接是否被空闲连接回收器(如果有)进行检验,如果检测失败,则连接将被从池中去除
    min-evictable-idle-time-millis: 3600000 #连接池中连接,在时间段内一直空闲,被逐出连接池的时间(1000*60*60),以毫秒为单位
    time-between-eviction-runs-millis: 300000 #在空闲连接回收器线程运行期间休眠的时间值,以毫秒为单位,一般比minEvictableIdleTimeMillis小
    url: jdbc:mysql://172.19.103.77/wlyy?useUnicode=true&amp;characterEncoding=utf-8&amp;autoReconnect=true
    username: root
    password: 123456
quartz:
  namespace: svr-demo ##quartz的命名空间,名称一样实现消费负载
  overwriteExistingJobs: true ##是否覆盖job