RocketMQ中PullConsumer的启动源码分析

时间:2023-03-09 00:06:12
RocketMQ中PullConsumer的启动源码分析

通过DefaultMQPullConsumer作为默认实现,这里的启动过程和Producer很相似,但相比复杂一些

【RocketMQ中Producer的启动源码分析】

DefaultMQPullConsumer的构造方法:

 public DefaultMQPullConsumer(final String consumerGroup, RPCHook rpcHook) {
this.consumerGroup = consumerGroup;
defaultMQPullConsumerImpl = new DefaultMQPullConsumerImpl(this, rpcHook);
}

这里会封装一个DefaultMQPullConsumerImpl,类似于Producer中DefaultMQProducerImpl

DefaultMQPullConsumerImpl:

 public class DefaultMQPullConsumerImpl implements MQConsumerInner {
private final InternalLogger log = ClientLogger.getLog();
private final DefaultMQPullConsumer defaultMQPullConsumer;
private final long consumerStartTimestamp = System.currentTimeMillis();
private final RPCHook rpcHook;
private final ArrayList<ConsumeMessageHook> consumeMessageHookList = new ArrayList<ConsumeMessageHook>();
private final ArrayList<FilterMessageHook> filterMessageHookList = new ArrayList<FilterMessageHook>();
private volatile ServiceState serviceState = ServiceState.CREATE_JUST;
private MQClientInstance mQClientFactory;
private PullAPIWrapper pullAPIWrapper;
private OffsetStore offsetStore;
private RebalanceImpl rebalanceImpl = new RebalancePullImpl(this); public DefaultMQPullConsumerImpl(final DefaultMQPullConsumer defaultMQPullConsumer, final RPCHook rpcHook) {
this.defaultMQPullConsumer = defaultMQPullConsumer;
this.rpcHook = rpcHook;
}
......
}

如上会封装这些东西,在后面遇到了再详细介绍

而DefaultMQPullConsumer的start方法,其实际上调用的是DefaultMQPullConsumerImpl的start方法

DefaultMQPullConsumerImpl的start方法:

 public synchronized void start() throws MQClientException {
switch (this.serviceState) {
case CREATE_JUST:
this.serviceState = ServiceState.START_FAILED; this.checkConfig(); this.copySubscription(); if (this.defaultMQPullConsumer.getMessageModel() == MessageModel.CLUSTERING) {
this.defaultMQPullConsumer.changeInstanceNameToPID();
} this.mQClientFactory = MQClientManager.getInstance().getAndCreateMQClientInstance(this.defaultMQPullConsumer, this.rpcHook); this.rebalanceImpl.setConsumerGroup(this.defaultMQPullConsumer.getConsumerGroup());
this.rebalanceImpl.setMessageModel(this.defaultMQPullConsumer.getMessageModel());
this.rebalanceImpl.setAllocateMessageQueueStrategy(this.defaultMQPullConsumer.getAllocateMessageQueueStrategy());
this.rebalanceImpl.setmQClientFactory(this.mQClientFactory); this.pullAPIWrapper = new PullAPIWrapper(
mQClientFactory,
this.defaultMQPullConsumer.getConsumerGroup(), isUnitMode());
this.pullAPIWrapper.registerFilterMessageHook(filterMessageHookList); if (this.defaultMQPullConsumer.getOffsetStore() != null) {
this.offsetStore = this.defaultMQPullConsumer.getOffsetStore();
} else {
switch (this.defaultMQPullConsumer.getMessageModel()) {
case BROADCASTING:
this.offsetStore = new LocalFileOffsetStore(this.mQClientFactory, this.defaultMQPullConsumer.getConsumerGroup());
break;
case CLUSTERING:
this.offsetStore = new RemoteBrokerOffsetStore(this.mQClientFactory, this.defaultMQPullConsumer.getConsumerGroup());
break;
default:
break;
}
this.defaultMQPullConsumer.setOffsetStore(this.offsetStore);
} this.offsetStore.load(); boolean registerOK = mQClientFactory.registerConsumer(this.defaultMQPullConsumer.getConsumerGroup(), this);
if (!registerOK) {
this.serviceState = ServiceState.CREATE_JUST; throw new MQClientException("The consumer group[" + this.defaultMQPullConsumer.getConsumerGroup()
+ "] has been created before, specify another name please." + FAQUrl.suggestTodo(FAQUrl.GROUP_NAME_DUPLICATE_URL),
null);
} mQClientFactory.start();
log.info("the consumer [{}] start OK", this.defaultMQPullConsumer.getConsumerGroup());
this.serviceState = ServiceState.RUNNING;
break;
case RUNNING:
case START_FAILED:
case SHUTDOWN_ALREADY:
throw new MQClientException("The PullConsumer service state not OK, maybe started once, "
+ this.serviceState
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_SERVICE_NOT_OK),
null);
default:
break;
} }

首先checkConfig方法会对配置做检查

接着copySubscription方法:

 private void copySubscription() throws MQClientException {
try {
Set<String> registerTopics = this.defaultMQPullConsumer.getRegisterTopics();
if (registerTopics != null) {
for (final String topic : registerTopics) {
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(this.defaultMQPullConsumer.getConsumerGroup(),
topic, SubscriptionData.SUB_ALL);
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
}
}
} catch (Exception e) {
throw new MQClientException("subscription exception", e);
}
}

这里的registerTopics是由用户调用setRegisterTopics方法注册进来的Topic集合
在这里会将集合中的Topic包装成SubscriptionData保存在rebalanceImpl中

SubscriptionData:

 public class SubscriptionData implements Comparable<SubscriptionData> {
public final static String SUB_ALL = "*";
private boolean classFilterMode = false;
private String topic;
private String subString;
private Set<String> tagsSet = new HashSet<String>();
private Set<Integer> codeSet = new HashSet<Integer>();
private long subVersion = System.currentTimeMillis();
private String expressionType = ExpressionType.TAG;
......
}

RebalanceImpl:

 public abstract class RebalanceImpl {
protected final ConcurrentMap<MessageQueue, ProcessQueue> processQueueTable = new ConcurrentHashMap<MessageQueue, ProcessQueue>(64);
protected final ConcurrentMap<String/* topic */, Set<MessageQueue>> topicSubscribeInfoTable =
new ConcurrentHashMap<String, Set<MessageQueue>>();
protected final ConcurrentMap<String /* topic */, SubscriptionData> subscriptionInner =
new ConcurrentHashMap<String, SubscriptionData>();
protected String consumerGroup;
protected MessageModel messageModel;
protected AllocateMessageQueueStrategy allocateMessageQueueStrategy;
protected MQClientInstance mQClientFactory;
......
}

回到start方法,接着和Producer中一样通过MQClientManager获取一个MQClientInstance
然后会完成对rebalanceImpl属性的填充

接着会实例化一个PullAPIWrapper,同时向其注册过滤器的钩子,这个对象在之后分析消息拉取时详细介绍

接下来会根据消息的模式,决定使用不同方式的OffsetStore

 public enum MessageModel {
/**
* broadcast
*/
BROADCASTING("BROADCASTING"),
/**
* clustering
*/
CLUSTERING("CLUSTERING");
......
}

分别是广播模式和集群模式
广播模式(BROADCASTING):同一个ConsumerGroup里的每个Consumer都能消费到所订阅Topic的全部消息,也就是一个消息会被多次分发,被多个Consumer消费
集群模式(CLUSTERING):同一个ConsumerGroup里的每个Consumer只消费所订阅消息的一部分内容,同一个ConsumerGroup里所有的Consumer消费的内容合起来才是所订阅Topic内容的整体

采用广播模式,消费者的消费进度offset会被保存在本地;而采用集群模式,消费者的消费进度offset会被保存在远端(broker)上
故广播模式使用LocalFileOffsetStore,集群模式使用RemoteBrokerOffsetStore

在采用广播模式,即LocalFileOffsetStore,调用load方法会对其配置文件offsets.json进行加载,而RemoteBrokerOffsetStore时没意义的异步操作
LocalFileOffsetStore的load方法:

 public void load() throws MQClientException {
OffsetSerializeWrapper offsetSerializeWrapper = this.readLocalOffset();
if (offsetSerializeWrapper != null && offsetSerializeWrapper.getOffsetTable() != null) {
offsetTable.putAll(offsetSerializeWrapper.getOffsetTable()); for (MessageQueue mq : offsetSerializeWrapper.getOffsetTable().keySet()) {
AtomicLong offset = offsetSerializeWrapper.getOffsetTable().get(mq);
log.info("load consumer's offset, {} {} {}",
this.groupName,
mq,
offset.get());
}
}
}

readLocalOffset方法会将offsets.json文件中的json字符串转换成OffsetSerializeWrapper对象封装

 public class OffsetSerializeWrapper extends RemotingSerializable {
private ConcurrentMap<MessageQueue, AtomicLong> offsetTable =
new ConcurrentHashMap<MessageQueue, AtomicLong>(); public ConcurrentMap<MessageQueue, AtomicLong> getOffsetTable() {
return offsetTable;
} public void setOffsetTable(ConcurrentMap<MessageQueue, AtomicLong> offsetTable) {
this.offsetTable = offsetTable;
}
}

从这里就可里大致理解json文件中的内容,其中AtomicLong就对应MessageQueue下具体的Offset
之后在load方法中,会将该map保存在LocalFileOffsetStore中的offsetTable中

接着会调用mQClientFactory的start方法,这个方法在【RocketMQ中Producer的启动源码分析】中进行过分析

 public void start() throws MQClientException {
synchronized (this) {
switch (this.serviceState) {
case CREATE_JUST:
this.serviceState = ServiceState.START_FAILED;
// If not specified,looking address from name server
if (null == this.clientConfig.getNamesrvAddr()) {
this.mQClientAPIImpl.fetchNameServerAddr();
}
// Start request-response channel
this.mQClientAPIImpl.start();
// Start various schedule tasks
this.startScheduledTask();
// Start pull service
this.pullMessageService.start();
// Start rebalance service
this.rebalanceService.start();
// Start push service
this.defaultMQProducer.getDefaultMQProducerImpl().start(false);
log.info("the client factory [{}] start OK", this.clientId);
this.serviceState = ServiceState.RUNNING;
break;
case RUNNING:
break;
case SHUTDOWN_ALREADY:
break;
case START_FAILED:
throw new MQClientException("The Factory object[" + this.getClientId() + "] has been created before, and failed.", null);
default:
break;
}
}
}

首先若是没有设置NameServer的地址,会调用fetchNameServerAddr方法进行自动寻址,详见Producer的启动

之后mQClientAPIImpl的start方法会完成对Netty客户端的绑定操作,详见Producer的启动

startScheduledTask方法则会设置五个定时任务:
①若是名称服务地址namesrvAddr不存在,则调用前面的fetchNameServerAddr方法,定时更新名称服务
②定时更新Topic所对应的路由信息
③定时清除离线的Broker,以及向当前在线的Broker发送心跳包
(以上详见Producer的启动)

④定时持久化消费者队列的消费进度
DefaultMQPullConsumerImpl中的实现:

 public void persistConsumerOffset() {
try {
this.makeSureStateOK();
Set<MessageQueue> mqs = new HashSet<MessageQueue>();
Set<MessageQueue> allocateMq = this.rebalanceImpl.getProcessQueueTable().keySet();
mqs.addAll(allocateMq);
this.offsetStore.persistAll(mqs);
} catch (Exception e) {
log.error("group: " + this.defaultMQPullConsumer.getConsumerGroup() + " persistConsumerOffset exception", e);
}
}

首先从rebalanceImpl中取出所有处理的消费队列MessageQueue集合
然后调用offsetStore的persistAll方法进一步处理该集合

由于广播模式和集群模式,所以这里有两种实现:
广播模式LocalFileOffsetStore的persistAll方法:

 public void persistAll(Set<MessageQueue> mqs) {
if (null == mqs || mqs.isEmpty())
return; OffsetSerializeWrapper offsetSerializeWrapper = new OffsetSerializeWrapper();
for (Map.Entry<MessageQueue, AtomicLong> entry : this.offsetTable.entrySet()) {
if (mqs.contains(entry.getKey())) {
AtomicLong offset = entry.getValue();
offsetSerializeWrapper.getOffsetTable().put(entry.getKey(), offset);
}
} String jsonString = offsetSerializeWrapper.toJson(true);
if (jsonString != null) {
try {
MixAll.string2File(jsonString, this.storePath);
} catch (IOException e) {
log.error("persistAll consumer offset Exception, " + this.storePath, e);
}
}
}

这里和之前的load方法相反,会将MessageQueue对应的offset信息替换掉原来的json文件中的内容
这样就完成了广播模式下定时持久化消费者队列的消费进度

集群模式RemoteBrokerOffsetStore的persistAll方法的实现:

 public void persistAll(Set<MessageQueue> mqs) {
if (null == mqs || mqs.isEmpty())
return; final HashSet<MessageQueue> unusedMQ = new HashSet<MessageQueue>();
if (!mqs.isEmpty()) {
for (Map.Entry<MessageQueue, AtomicLong> entry : this.offsetTable.entrySet()) {
MessageQueue mq = entry.getKey();
AtomicLong offset = entry.getValue();
if (offset != null) {
if (mqs.contains(mq)) {
try {
this.updateConsumeOffsetToBroker(mq, offset.get());
log.info("[persistAll] Group: {} ClientId: {} updateConsumeOffsetToBroker {} {}",
this.groupName,
this.mQClientFactory.getClientId(),
mq,
offset.get());
} catch (Exception e) {
log.error("updateConsumeOffsetToBroker exception, " + mq.toString(), e);
}
} else {
unusedMQ.add(mq);
}
}
}
} if (!unusedMQ.isEmpty()) {
for (MessageQueue mq : unusedMQ) {
this.offsetTable.remove(mq);
log.info("remove unused mq, {}, {}", mq, this.groupName);
}
}
}

和上面类似,遍历offsetTable中的内容,只不过不是保存在了本地,而是通过updateConsumeOffsetToBroker向Broker发送
updateConsumeOffsetToBroker方法:

 private void updateConsumeOffsetToBroker(MessageQueue mq, long offset) throws RemotingException,
MQBrokerException, InterruptedException, MQClientException {
updateConsumeOffsetToBroker(mq, offset, true);
} public void updateConsumeOffsetToBroker(MessageQueue mq, long offset, boolean isOneway) throws RemotingException,
MQBrokerException, InterruptedException, MQClientException {
FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInAdmin(mq.getBrokerName());
if (null == findBrokerResult) { this.mQClientFactory.updateTopicRouteInfoFromNameServer(mq.getTopic());
findBrokerResult = this.mQClientFactory.findBrokerAddressInAdmin(mq.getBrokerName());
} if (findBrokerResult != null) {
UpdateConsumerOffsetRequestHeader requestHeader = new UpdateConsumerOffsetRequestHeader();
requestHeader.setTopic(mq.getTopic());
requestHeader.setConsumerGroup(this.groupName);
requestHeader.setQueueId(mq.getQueueId());
requestHeader.setCommitOffset(offset); if (isOneway) {
this.mQClientFactory.getMQClientAPIImpl().updateConsumerOffsetOneway(
findBrokerResult.getBrokerAddr(), requestHeader, 1000 * 5);
} else {
this.mQClientFactory.getMQClientAPIImpl().updateConsumerOffset(
findBrokerResult.getBrokerAddr(), requestHeader, 1000 * 5);
}
} else {
throw new MQClientException("The broker[" + mq.getBrokerName() + "] not exist", null);
}
}

首先根据BrokerName查找Broker的路由信息:

 public FindBrokerResult findBrokerAddressInAdmin(final String brokerName) {
String brokerAddr = null;
boolean slave = false;
boolean found = false; HashMap<Long/* brokerId */, String/* address */> map = this.brokerAddrTable.get(brokerName);
if (map != null && !map.isEmpty()) {
for (Map.Entry<Long, String> entry : map.entrySet()) {
Long id = entry.getKey();
brokerAddr = entry.getValue();
if (brokerAddr != null) {
found = true;
if (MixAll.MASTER_ID == id) {
slave = false;
} else {
slave = true;
}
break; }
} // end of for
} if (found) {
return new FindBrokerResult(brokerAddr, slave, findBrokerVersion(brokerName, brokerAddr));
} return null;
}

brokerAddrTable中的borker的路由信息会由 ②定时更新Topic所对应的路由信息 ,来完成更新,在brokerAddrTable中只要找的一个Broker的信息后,将其封装为FindBrokerResult返回

若是没有找到会执行updateTopicRouteInfoFromNameServer方法,也就是执行了一次定时任务中的方法,立即更新一次,再通过findBrokerAddressInAdmin方法,重新查找

找到之后,实例化一个请求头 UpdateConsumerOffsetRequestHeader,将相应信息封装,由于使用的是Oneway模式,所以这里采用updateConsumerOffsetOneway方法,通过Netty向Broker发送

 public void updateConsumerOffsetOneway(
final String addr,
final UpdateConsumerOffsetRequestHeader requestHeader,
final long timeoutMillis
) throws RemotingConnectException, RemotingTooMuchRequestException, RemotingTimeoutException, RemotingSendRequestException,
InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_CONSUMER_OFFSET, requestHeader); this.remotingClient.invokeOneway(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis);
}

其实这里就非常简单地调用了invokeOneway方法,完成向Broker的消息单向发送

【RocketMQ中Producer消息的发送源码分析】

非OneWay则采用同步发送
这样,在集群模式下,消费进度也就交给了Broker管理,之后的负载均衡以此为基础

⑤定时调整消费者端的线程池的大小
这里针对的是PushConsumer,后续博客再介绍

对于PullConsumer来说rebalanceService服务的开启才是最重要的

RebalanceService:

 public void run() {
log.info(this.getServiceName() + " service started"); while (!this.isStopped()) {
this.waitForRunning(waitInterval);
this.mqClientFactory.doRebalance();
} log.info(this.getServiceName() + " service end");
}

这里的waitForRunning和Broker的刷盘以及主从复制类似,会进行超时阻塞(默认20s),也可以通过Broker发送的NOTIFY_CONSUMER_IDS_CHANGED请求将其唤醒,之后会调用doRebalance方法

RebalanceImpl的doRebalance方法:

 public void doRebalance(final boolean isOrder) {
Map<String, SubscriptionData> subTable = this.getSubscriptionInner();
if (subTable != null) {
for (final Map.Entry<String, SubscriptionData> entry : subTable.entrySet()) {
final String topic = entry.getKey();
try {
this.rebalanceByTopic(topic, isOrder);
} catch (Throwable e) {
if (!topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
log.warn("rebalanceByTopic Exception", e);
}
}
}
} this.truncateMessageQueueNotMyTopic();
}

这里就会取得copySubscription方法中说过的订阅Topic集合,这个集合会在②中的定时任务会通过NameServer来进行更新

通过rebalanceByTopic方法,处理订阅的Topic:

 private void rebalanceByTopic(final String topic, final boolean isOrder) {
switch (messageModel) {
case BROADCASTING: {
Set<MessageQueue> mqSet = this.topicSubscribeInfoTable.get(topic);
if (mqSet != null) {
boolean changed = this.updateProcessQueueTableInRebalance(topic, mqSet, isOrder);
if (changed) {
this.messageQueueChanged(topic, mqSet, mqSet);
log.info("messageQueueChanged {} {} {} {}",
consumerGroup,
topic,
mqSet,
mqSet);
}
} else {
log.warn("doRebalance, {}, but the topic[{}] not exist.", consumerGroup, topic);
}
break;
}
case CLUSTERING: {
Set<MessageQueue> mqSet = this.topicSubscribeInfoTable.get(topic);
List<String> cidAll = this.mQClientFactory.findConsumerIdList(topic, consumerGroup);
if (null == mqSet) {
if (!topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
log.warn("doRebalance, {}, but the topic[{}] not exist.", consumerGroup, topic);
}
} if (null == cidAll) {
log.warn("doRebalance, {} {}, get consumer id list failed", consumerGroup, topic);
} if (mqSet != null && cidAll != null) {
List<MessageQueue> mqAll = new ArrayList<MessageQueue>();
mqAll.addAll(mqSet); Collections.sort(mqAll);
Collections.sort(cidAll); AllocateMessageQueueStrategy strategy = this.allocateMessageQueueStrategy; List<MessageQueue> allocateResult = null;
try {
allocateResult = strategy.allocate(
this.consumerGroup,
this.mQClientFactory.getClientId(),
mqAll,
cidAll);
} catch (Throwable e) {
log.error("AllocateMessageQueueStrategy.allocate Exception. allocateMessageQueueStrategyName={}", strategy.getName(),
e);
return;
} Set<MessageQueue> allocateResultSet = new HashSet<MessageQueue>();
if (allocateResult != null) {
allocateResultSet.addAll(allocateResult);
} boolean changed = this.updateProcessQueueTableInRebalance(topic, allocateResultSet, isOrder);
if (changed) {
log.info(
"rebalanced result changed. allocateMessageQueueStrategyName={}, group={}, topic={}, clientId={}, mqAllSize={}, cidAllSize={}, rebalanceResultSize={}, rebalanceResultSet={}",
strategy.getName(), consumerGroup, topic, this.mQClientFactory.getClientId(), mqSet.size(), cidAll.size(),
allocateResultSet.size(), allocateResultSet);
this.messageQueueChanged(topic, mqSet, allocateResultSet);
}
}
break;
}
default:
break;
}
}

这里会根据广播模式和集群模式做不同的处理

广播模式:
先根据Topic取得对应的所有消息队列的集合

然后先通过updateProcessQueueTableInRebalance方法处理:

 private boolean updateProcessQueueTableInRebalance(final String topic, final Set<MessageQueue> mqSet,
final boolean isOrder) {
boolean changed = false; Iterator<Entry<MessageQueue, ProcessQueue>> it = this.processQueueTable.entrySet().iterator();
while (it.hasNext()) {
Entry<MessageQueue, ProcessQueue> next = it.next();
MessageQueue mq = next.getKey();
ProcessQueue pq = next.getValue(); if (mq.getTopic().equals(topic)) {
if (!mqSet.contains(mq)) {
pq.setDropped(true);
if (this.removeUnnecessaryMessageQueue(mq, pq)) {
it.remove();
changed = true;
log.info("doRebalance, {}, remove unnecessary mq, {}", consumerGroup, mq);
}
} else if (pq.isPullExpired()) {
switch (this.consumeType()) {
case CONSUME_ACTIVELY:
break;
case CONSUME_PASSIVELY:
pq.setDropped(true);
if (this.removeUnnecessaryMessageQueue(mq, pq)) {
it.remove();
changed = true;
log.error("[BUG]doRebalance, {}, remove unnecessary mq, {}, because pull is pause, so try to fixed it",
consumerGroup, mq);
}
break;
default:
break;
}
}
}
} List<PullRequest> pullRequestList = new ArrayList<PullRequest>();
for (MessageQueue mq : mqSet) {
if (!this.processQueueTable.containsKey(mq)) {
if (isOrder && !this.lock(mq)) {
log.warn("doRebalance, {}, add a new mq failed, {}, because lock failed", consumerGroup, mq);
continue;
} this.removeDirtyOffset(mq);
ProcessQueue pq = new ProcessQueue();
long nextOffset = this.computePullFromWhere(mq);
if (nextOffset >= 0) {
ProcessQueue pre = this.processQueueTable.putIfAbsent(mq, pq);
if (pre != null) {
log.info("doRebalance, {}, mq already exists, {}", consumerGroup, mq);
} else {
log.info("doRebalance, {}, add a new mq, {}", consumerGroup, mq);
PullRequest pullRequest = new PullRequest();
pullRequest.setConsumerGroup(consumerGroup);
pullRequest.setNextOffset(nextOffset);
pullRequest.setMessageQueue(mq);
pullRequest.setProcessQueue(pq);
pullRequestList.add(pullRequest);
changed = true;
}
} else {
log.warn("doRebalance, {}, add new mq failed, {}", consumerGroup, mq);
}
}
} this.dispatchPullRequest(pullRequestList); return changed;
}

若是消息队列发生了更新,这里首先在while循环中会将处理队列中的无用的记录删除
而在for循环中则是为了添加新的处理记录,向processQueueTable添加了处理记录,computePullFromWhere方法在PullConsumer中默认返回0,作为nextOffset,会将该nextOffset作为下次拉取消息的位置保存在ProcessQueue中,进而保存在processQueueTable中,作为处理任务的记录

之后的dispatchPullRequest方法是对于PushConsumer而言的,这里没有作用

回到rebalanceByTopic方法,若是发生了更新,会调用messageQueueChanged方法:

 public void messageQueueChanged(String topic, Set<MessageQueue> mqAll, Set<MessageQueue> mqDivided) {
MessageQueueListener messageQueueListener = this.defaultMQPullConsumerImpl.getDefaultMQPullConsumer().getMessageQueueListener();
if (messageQueueListener != null) {
try {
messageQueueListener.messageQueueChanged(topic, mqAll, mqDivided);
} catch (Throwable e) {
log.error("messageQueueChanged exception", e);
}
}
}

这里实际上就交给MessageQueueListener执行messageQueueChanged回调方法

集群模式:
首先还是根据Topic得到消息队列的集合
由于是集合模式,每个消费者会取得不同的消息,所以这里通过findConsumerIdList方法,得到消费者的ID列表

 public List<String> findConsumerIdList(final String topic, final String group) {
String brokerAddr = this.findBrokerAddrByTopic(topic);
if (null == brokerAddr) {
this.updateTopicRouteInfoFromNameServer(topic);
brokerAddr = this.findBrokerAddrByTopic(topic);
} if (null != brokerAddr) {
try {
return this.mQClientAPIImpl.getConsumerIdListByGroup(brokerAddr, group, 3000);
} catch (Exception e) {
log.warn("getConsumerIdListByGroup exception, " + brokerAddr + " " + group, e);
}
} return null;
}

findBrokerAddrByTopic方法,会根据Topic选取所在集群的一个Broker的地址(由②定时任务通过NameServer更新),若是master存在选择master,否则随机选择一个slave

若是没找到,则重新向NameServer请求更新,再找一次

当得到Broker的地址信息后,通过getConsumerIdListByGroup方法,向Broker发送请求:

 public List<String> getConsumerIdListByGroup(
final String addr,
final String consumerGroup,
final long timeoutMillis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException,
MQBrokerException, InterruptedException {
GetConsumerListByGroupRequestHeader requestHeader = new GetConsumerListByGroupRequestHeader();
requestHeader.setConsumerGroup(consumerGroup);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUMER_LIST_BY_GROUP, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
if (response.getBody() != null) {
GetConsumerListByGroupResponseBody body =
GetConsumerListByGroupResponseBody.decode(response.getBody(), GetConsumerListByGroupResponseBody.class);
return body.getConsumerIdList();
}
}
default:
break;
} throw new MQBrokerException(response.getCode(), response.getRemark());
}

这里实际上就是向Broker发送了一个GET_CONSUMER_LIST_BY_GROUP请求,进行同步发送,再收到响应后,将响应中的数据,也就是消费者ID的封装成的List返回

回到rebalanceByTopic方法,得到消费者的ID列表后
会根据分配策略进行分配,这里默认使用的是AllocateMessageQueueAveragely
然后调用它的allocate方法,进行分配

 public List<MessageQueue> allocate(String consumerGroup, String currentCID, List<MessageQueue> mqAll,
List<String> cidAll) {
if (currentCID == null || currentCID.length() < 1) {
throw new IllegalArgumentException("currentCID is empty");
}
if (mqAll == null || mqAll.isEmpty()) {
throw new IllegalArgumentException("mqAll is null or mqAll empty");
}
if (cidAll == null || cidAll.isEmpty()) {
throw new IllegalArgumentException("cidAll is null or cidAll empty");
} List<MessageQueue> result = new ArrayList<MessageQueue>();
if (!cidAll.contains(currentCID)) {
log.info("[BUG] ConsumerGroup: {} The consumerId: {} not in cidAll: {}",
consumerGroup,
currentCID,
cidAll);
return result;
} int index = cidAll.indexOf(currentCID);
int mod = mqAll.size() % cidAll.size();
int averageSize =
mqAll.size() <= cidAll.size() ? 1 : (mod > 0 && index < mod ? mqAll.size() / cidAll.size()
+ 1 : mqAll.size() / cidAll.size());
int startIndex = (mod > 0 && index < mod) ? index * averageSize : index * averageSize + mod;
int range = Math.min(averageSize, mqAll.size() - startIndex);
for (int i = 0; i < range; i++) {
result.add(mqAll.get((startIndex + i) % mqAll.size()));
}
return result;
}

(关于这个ID在Producer的启动中介绍过,是在MQClientManager的getAndCreateMQClientInstance方法中,对于客户端来说是唯一的)

由于是集群模式,那么这里的Consumer也理所应当作为其中一员,所以会检查currentCID是否包含在集合中

接着会根据消费者的数量以及消息的数量,进行消息的分配,以此达到消费者端的负载均衡
这里采用的是平均分配的方式,利用消息的数量以及消费者的数量就,计算出当前消费者需要消费哪部分消息

处理之外,RocketMQ中还提供其他几种分配方式,根据需要,酌情使用

回到rebalanceByTopic方法中,在完成消息的分配后
会调用updateProcessQueueTableInRebalance方法,完成对消息队列和处理队列的更新,若是发生了更新,再通过messageQueueChanged方法,调用回调接口的方法,完成对消息队列变化的通知

至此,PullConsumer的启动结束