security-jwt2是一个通用的基于RBAC的权限管理系统的前后端分离项目,实现了很多通用的功能,是一个可以拿来即用的一个项目。
docker build -t backend:v1.0.0 .
docker run -d -p 8188:8188 backend:v1.0.0
@Test
public void importOperationLog2Es01() throws IOException {
long start = System.currentTimeMillis();
BulkRequest bulkRequest = new BulkRequest();
//查询所有操作日志记录
List<OperationLog> operationLogList = operationLogService.lambdaQuery().list();
for (OperationLog operationLog : operationLogList) {
IndexRequest indexRequest = new IndexRequest(OPER_LOG_INDEX);
indexRequest.id(operationLog.getId().toString());
Map<String, Object> sources = new ConcurrentHashMap<>();
sources.put("username", operationLog.getUsername());
sources.put("type", operationLog.getType());
sources.put("uri", operationLog.getUri());
sources.put("time", operationLog.getTime());
sources.put("ip", operationLog.getIp());
sources.put("address",operationLog.getAddress());
sources.put("browser", operationLog.getBrowser());
sources.put("os", operationLog.getOs());
sources.put("operTime", operationLog.getOperTime());
sources.put("delFlag", operationLog.getDelFlag());
indexRequest.source(sources);
bulkRequest.add(indexRequest);
}
restHighLevelClient.bulk(bulkRequest,RequestOptions.DEFAULT);
long end = System.currentTimeMillis();
System.out.println((end-start)+"ms");
}
@Test
public void importOperationLog2Es02() throws IOException, InterruptedException {
long start = System.currentTimeMillis();
ThreadPoolExecutor threadPoolExecutor
= new ThreadPoolExecutor(10,
20,
2L,
TimeUnit.SECONDS,
new LinkedBlockingDeque<>(),
Executors.defaultThreadFactory(),
new ThreadPoolExecutor.AbortPolicy());
//数据总数
long count = operationLogService.count();
//每一页大小。
int size= 1000;
//循环次数。如果count=132760(也就是说不是整数,则循环次数+1)
long circle=(count%size==0)?count/size : (count/size)+1;
//JUC包下的倒计时器
CountDownLatch countDownLatch = new CountDownLatch((int)circle);
for (long i = 1; i < circle+1; i++) {
int page= (int) ((i-1)*size);
threadPoolExecutor.submit(()->{
try {
List<OperationLog> operationLogList = operationLogMapper.selectAllOperationLogByLimit(page, size);
BulkRequest bulkRequest = new BulkRequest();
for (OperationLog operationLog : operationLogList) {
IndexRequest indexRequest = new IndexRequest(OPER_LOG_INDEX);
indexRequest.id(operationLog.getId().toString());
Map<String, Object> sources = new ConcurrentHashMap<>();
sources.put("username", operationLog.getUsername());
sources.put("type", operationLog.getType());
sources.put("uri", operationLog.getUri());
sources.put("time", operationLog.getTime());
sources.put("ip", operationLog.getIp());
sources.put("address",operationLog.getAddress());
sources.put("browser", operationLog.getBrowser());
sources.put("os", operationLog.getOs());
sources.put("operTime", operationLog.getOperTime());
sources.put("delFlag", operationLog.getDelFlag());
indexRequest.source(sources);
bulkRequest.add(indexRequest);
}
restHighLevelClient.bulk(bulkRequest,RequestOptions.DEFAULT);
}catch (Exception e){
e.printStackTrace();
}
finally {
//计数器 -1
countDownLatch.countDown();
}
});
}
//阻塞线程,只有当计数器=0时才会解除阻塞。防止主线程执行完了,子线程还没有执行完就终止了该程序的运行。
countDownLatch.await();
//终止时间
long end = System.currentTimeMillis();
System.out.println((end-start)+"ms");
}
分页参数为:LIMIT 130000,7
SELECT id, username, type, uri, time, ip, address, browser, os, oper_time,del_flag
FROM sys_oper_log
WHERE del_flag = 0
ORDER BY id DESC
LIMIT 130000,7
SELECT id, username, type, uri, time, ip, address, browser, os, oper_time,del_flag
FROM sys_oper_log
WHERE id <![CDATA[<=]]> (SELECT id FROM sys_oper_log ORDER BY id DESC LIMIT 130000,1)
AND del_flag = 0
ORDER BY id DESC
LIMIT 7
在分页参数都相同的情况下,上面两个SQL的性能差距十分大。原因是后者的子查询利用了主键索引提高性能,避免了常规分页查询的弊端(也就是从头到尾的全表扫描),如果在数据量更大的情况下,上面的两个SQL性能差距会拉的十分大。
SELECT id, username, type, uri, time, ip, address, browser, os, oper_time,del_flag
FROM sys_oper_log
WHERE id <![CDATA[<=]]> (SELECT id FROM sys_oper_log ORDER BY id DESC LIMIT 130000,1)
AND del_flag = 0
ORDER BY id DESC
LIMIT 7
SELECT
sys_user.id,
sys_user.user_name,
sys_user.nick_name,
sys_user.status,
sys_user.avatar,
sys_user.email,
sys_user.phone,
sys_user.sex,
sys_user.create_time,
sys_user.update_time,
sys_user.del_flag
FROM sys_user
WHERE id >= (select id from sys_user ORDER BY id limit 100,1)
AND del_flag = 0
ORDER BY id ASC
limit 7
Caused by: ElasticsearchException[Elasticsearch exception [type=illegal_argument_exception, reason=Result window is too large, from + size must be less than or equal to: [10000] but was [132734]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.]]; nested: ElasticsearchException[Elasticsearch exception [type=illegal_argument_exception, reason=Result window is too large, from + size must be less than or equal to: [10000] but was [132734]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.]];
curl -H "Content-Type: application/json" -XPUT http://192.168.184.123:9200/operation-log-index/_settings -d '{"index.blocks": {"read_only_allow_delete": null}}'
curl -H "Content-Type: application/json" -XPUT http://192.168.184.123:9200/operation-log-index/_settings -d '{"index.max_result_window":"50000000"}'
Can not set java.time.LocalDateTime field com.boot.entity.OperationLog.operTime to null value
type.equals(java.sql.Date.class) ? parseDate(columnValue) : columnValue
),而columnValue是String类型,所以我们可以用String类型去接收LocalDateTime类型。static Object convertType(Class<?> type, String columnValue) {
if (columnValue == null) {
return null;
} else if (type.equals(Integer.class)) {
return Integer.parseInt(columnValue);
} else if (type.equals(Long.class)) {
return Long.parseLong(columnValue);
} else if (type.equals(Boolean.class)) {
return convertToBoolean(columnValue);
} else if (type.equals(BigDecimal.class)) {
return new BigDecimal(columnValue);
} else if (type.equals(Double.class)) {
return Double.parseDouble(columnValue);
} else if (type.equals(Float.class)) {
return Float.parseFloat(columnValue);
} else if (type.equals(Date.class)) {
return parseDate(columnValue);
} else {
return type.equals(java.sql.Date.class) ? parseDate(columnValue) : columnValue;
}
}
解决办法有两个:
(1)第一种办法:
1:创建一个中转类(也就是说我们报错的原因是OperationLog类中的LocalDateTime属性,这个时候我们可以创建一个新的类,其他字段不变,把这个LocalDateTime日期类变成String就可以解决这个bug)
package com.boot.entity;
import com.alibaba.excel.annotation.ExcelProperty;
import com.baomidou.mybatisplus.annotation.TableField;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableLogic;
import com.baomidou.mybatisplus.annotation.TableName;
import com.boot.converter.DelFlagConverter;
import com.boot.converter.LocalDateTimeConverter;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.fasterxml.jackson.databind.ser.std.ToStringSerializer;
import io.swagger.annotations.ApiModelProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import javax.persistence.Column;
import java.io.Serializable;
import java.time.LocalDateTime;
/**
* 操作日志canal中转类。解决OperationLog类中的LocalDatetime类型的字段无法被canal接收导致报错
*
* @author youzhengjie
* @date 2022/10/30 21:16:54
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
@JsonInclude(JsonInclude.Include.NON_NULL)
@Builder
public class OperationLogCanal implements Serializable {
private static final long serialVersionUID = 1L;
private Long id;
private String username;
private String type;
private String uri;
private String time;
private String ip;
private String address;
private String browser;
private String os;
//canal+springboot当属性名和数据库字段不一致时,要用@Column去指定数据库字段名,否则会接收不到canal数据
@Column(name = "oper_time")
private String operTime;
//canal+springboot当属性名和数据库字段不一致时,要用@Column去指定数据库字段名,否则会接收不到canal数据
@Column(name = "del_flag")
private Integer delFlag;
}
package com.boot.service;
import com.baomidou.mybatisplus.extension.service.IService;
import com.boot.entity.OperationLog;
import java.util.List;
/**
* 操作日志服务
*
* @author youzhengjie
* @date 2022/10/21 23:32:14
*/
public interface OperationLogService extends IService<OperationLog> {
long selectAllOperationLogCount();
/**
* 添加操作日志到elasticsearch
*
* @param operationLog 操作日志
* @return boolean
*/
boolean addOperationLogToEs(OperationLog operationLog);
/**
* 根据id删除elasticsearch中的操作日志
*
* @param id id
* @return boolean
*/
boolean deleteOperationLogToEs(Long id);
/**
* 更新elasticsearch中的操作日志
*
* @param operationLog 操作日志
* @return boolean
*/
boolean updateOperationLogToEs(OperationLog operationLog);
}
package com.boot.service.impl;
import cn.hutool.core.bean.BeanUtil;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.boot.entity.OperationLog;
import com.boot.mapper.OperationLogMapper;
import com.boot.service.OperationLogService;
import lombok.extern.slf4j.Slf4j;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.core.CountRequest;
import org.elasticsearch.client.core.CountResponse;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* 操作日志服务impl
*
* @author youzhengjie
* @date 2022/10/21 23:42:49
*/
@Service
@Slf4j
public class OperationLogServiceImpl extends ServiceImpl<OperationLogMapper, OperationLog> implements OperationLogService {
@Autowired
private OperationLogMapper operationLogMapper;
@Autowired
private RestHighLevelClient restHighLevelClient;
/**
* 操作日志的es索引
*/
private static final String OPER_LOG_INDEX="operation-log-index";
@Override
public boolean addOperationLogToEs(OperationLog operationLog) {
try {
IndexRequest indexRequest = new IndexRequest(OPER_LOG_INDEX);
indexRequest.id(operationLog.getId().toString());
Map<String, Object> sources = new ConcurrentHashMap<>();
sources.put("username", operationLog.getUsername());
sources.put("type", operationLog.getType());
sources.put("uri", operationLog.getUri());
sources.put("time", operationLog.getTime());
sources.put("ip", operationLog.getIp());
sources.put("address",operationLog.getAddress());
sources.put("browser", operationLog.getBrowser());
sources.put("os", operationLog.getOs());
sources.put("operTime", operationLog.getOperTime());
sources.put("delFlag", operationLog.getDelFlag());
indexRequest.source(sources);
restHighLevelClient.index(indexRequest,RequestOptions.DEFAULT);
return true;
}catch (Exception e){
e.printStackTrace();
return false;
}
}
@Override
public boolean deleteOperationLogToEs(Long id) {
try {
DeleteRequest deleteRequest = new DeleteRequest(OPER_LOG_INDEX);
deleteRequest.id(id.toString());
restHighLevelClient.delete(deleteRequest,RequestOptions.DEFAULT);
return true;
}catch (Exception e) {
e.printStackTrace();
return false;
}
}
@Override
public boolean updateOperationLogToEs(OperationLog operationLog) {
try {
//将operationLog封装成Map
Map<String,Object> operationLogMap=new ConcurrentHashMap<>();
//将operationLog拷贝到Map中
BeanUtil.copyProperties(operationLog,operationLogMap);
//把map中的id去掉
operationLogMap.remove("id");
String idStr = operationLog.getId().toString();
UpdateRequest updateRequest = new UpdateRequest(OPER_LOG_INDEX,idStr);
updateRequest.doc(operationLogMap);
restHighLevelClient.update(updateRequest,RequestOptions.DEFAULT);
return true;
}catch (Exception e){
e.printStackTrace();
return false;
}
}
}
package com.boot.canal;
import cn.hutool.core.bean.BeanUtil;
import com.boot.entity.OperationLog;
import com.boot.entity.OperationLogCanal;
import com.boot.service.OperationLogService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import top.javatool.canal.client.annotation.CanalTable;
import top.javatool.canal.client.handler.EntryHandler;
/**
* 操作日志canal处理器
*
* @author youzhengjie
* @date 2022/10/30 15:19:09
*/
@CanalTable("sys_oper_log") //@CanalTable("sys_oper_log"):指定canal监听的表名为sys_oper_log
@Component
@Slf4j
public class OperationLogCanalHandle implements EntryHandler<OperationLogCanal> {
@Autowired
private OperationLogService operationLogService;
@Override
public void insert(OperationLogCanal operationLogCanal) {
//编写mysql和缓存同步的逻辑(例如JVM本地缓存、Redis分布式缓存、es等)
OperationLog operationLog = new OperationLog();
//bean拷贝
BeanUtil.copyProperties(operationLogCanal,operationLog);
//同步到es中
operationLogService.addOperationLogToEs(operationLog);
log.warn("OperationLogCanalHandle->insert->开始同步->"+operationLog);
}
/**
* 更新
*
* @param before 之前
* @param after 之后
*/
@Override
public void update(OperationLogCanal before, OperationLogCanal after) {
//编写mysql和缓存同步的逻辑(例如JVM本地缓存、Redis分布式缓存、es等)
OperationLog operationLog = new OperationLog();
//注意:要拷贝after对象,这个对象是修改之后的对象
BeanUtil.copyProperties(after,operationLog);
//同步es
operationLogService.updateOperationLogToEs(operationLog);
log.warn("OperationLogCanalHandle->update->开始同步->"+operationLog);
}
@Override
public void delete(OperationLogCanal operationLogCanal) {
//编写mysql和缓存同步的逻辑(例如JVM本地缓存、Redis分布式缓存、es等)
Long id = operationLogCanal.getId();
//同步es
operationLogService.deleteOperationLogToEs(id);
log.warn("OperationLogCanalHandle->delete->开始同步->"+id);
}
}
@Override
public String buildTreeByUserId(long userid){
try {
//查询所有菜单
List<Menu> allMenu = menuService.getMenuListByUserId(userid);
//根节点
List<Menu> rootMenu = new ArrayList<Menu>();
for (Menu nav : allMenu) {
if(nav.getParentId()==0){//父节点是0的,为根节点。
rootMenu.add(nav);
}
}
/* 根据Menu类的order排序 */
Collections.sort(rootMenu);
//为根菜单设置子菜单,getClild是递归调用的
for (Menu nav : rootMenu) {
/* 获取根节点下的所有子节点 使用getChild方法*/
List<Menu> childList = getChild(nav.getId(), allMenu);
nav.setChildren(childList);//给根节点设置子节点
}
Menu dashboardMenu = Menu.builder()
.id(-66L)
.menuName("仪表盘")
.path("/dashboard")
.icon("el-icon-s-home")
.component("../views/dashboard/index")
.children(new ArrayList<>())
.build();
rootMenu.add(0,dashboardMenu);
return JSON.toJSONString(rootMenu);
} catch (Exception e) {
return null;
}
}
private List<Menu> getChild(long id,List<Menu> allMenu){
//子菜单
List<Menu> childList = new ArrayList<Menu>();
for (Menu nav : allMenu) {
// 遍历所有节点,将所有菜单的父id与传过来的根节点的id比较
//相等说明:为该根节点的子节点。
if(nav.getParentId().equals(id)){
childList.add(nav);
}
}
//递归
for (Menu nav : childList) {
nav.setChildren(getChild(nav.getId(), allMenu));
}
Collections.sort(childList);//排序
//如果节点下没有子节点,返回一个空List(递归退出)
if(childList.size() == 0){
return new ArrayList<Menu>();
}
return childList;
}
错误案例:使用这种方法不会让v-model实时更新 this.selectNode.menuName=res.data.data 正确案例: this.selectNode.menuName=res.data.data this.$forceUpdate() //强制刷新v-model
解决方法: 使用store._modules.root._children.user.state.dynamicRouter获取dynamicRouter。
解决方法:(注意:下面的常量前缀必须是'../views',不能把/view写到component里面,不然一样会找不到模块) component:resolve => require(['../views'+menu.component],resolve)
npm i vue-router@3
npm i vuex@3
npm install --save axios vue-axios
npm i element-ui -S
npm i less less-loader@7
npm install js-cookie --save
npm install vue-fragment --save
npm i -S nprogress
npm install echarts --save
npm install
npm run serve
npm run build
npm run lint
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。