This commit is contained in:
2026-02-10 23:08:39 +08:00
parent 1baa36026c
commit 6680585975
172 changed files with 52867 additions and 892 deletions

View File

View File

@@ -0,0 +1,42 @@
# Copyright Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
# Todo: Once https://github.com/vllm-project/vllm/pull/24069 is merged in vllm. Remove this policy.
from abc import abstractmethod
class DynamicConfig:
placement_policy = None
max_transferred_expert_per_layer = 100 # Maximum number of experts that can be migrated per layer on a single host
ep_worldsize = 64 # Total number of dies across the entire cluster where experts are distributed
num_die_per_host = 8 # Number of dies on each host machine
class EplbPolicy:
def __init__(self, config: DynamicConfig):
self.config = config
@abstractmethod
def rebalance_experts(self, current_expert_table, expert_workload):
"""
Pass in the weights and return expert replication and placement under relevant constraints.
INPUT:
current_expert_table: [layerId, rankId, expert_num_i]
expert_workload = expert_table[layer0][rankId][expert_num_i]
RETURNED: (res, expert_table)
res:
1 -- table_changed
0 -- not_changed
expert_table: [layerId, rankId, expert_num_i]
expert_num_i --- [0, MaxExpertPerRank]
expertID = expert_table[layer0][rankId][expert_num_i]
array_values:
[0, 1, 2, 3, 248]
[4, 5, 6, 7, 254]
[8, 9, 10, 11, 71]
...
[252, 253, 254, 255, 0]
"""
pass

View File

@@ -0,0 +1,389 @@
# Copyright Huawei Technologies Co., Ltd. 2024-2025. All rights reserved.
# Todo: Once https://github.com/vllm-project/vllm/pull/24069 is merged in vllm. Remove this policy.
from collections import defaultdict
from typing import cast
import numpy as np
from .policy_abstract import DynamicConfig, EplbPolicy
class DynamicTable:
# workload_table:
# 3D matrix: [layer, gpus, experts_per_gpu_per_layer] -> value: workload (heat) at the corresponding position
# Size: number of layers * number of GPUs * number of experts per GPU per layer
# The element at (i, j, k) represents the workload (heat) of the k-th expert on the j-th GPU in the i-th layer
# For experts that are not available or collected, the value is set to -1
workload_table = None
# placement_table:
# 3D matrix: [layer, gpus, experts_per_gpu_per_layer] -> value: physical expert ID at the corresponding position
# Size: number of layers * number of GPUs * number of experts per GPU per layer
# The element at (i, j, k) represents the physical expert ID of the k-th expert on the j-th GPU in the i-th layer
# For experts that are not available or collected, the value is set to -1
placement_table = None
class DynamicEplb(EplbPolicy):
def __init__(self, config: DynamicConfig):
super().__init__(config)
@staticmethod
def add_redundant(current_expert_table, expert_workload,
num_original_expert):
layer_num, npu_num, experts_per_npu = expert_workload.shape
workload_new = np.zeros((layer_num, num_original_expert))
for layer_idx in range(layer_num):
workload_dict: dict[int, int] = defaultdict(int)
placement_layer = current_expert_table[layer_idx].copy()
workload_layer = expert_workload[layer_idx].copy()
for npu_idx in range(npu_num):
for expert_idx in range(experts_per_npu):
workload_dict[placement_layer[npu_idx][
expert_idx]] += workload_layer[npu_idx][expert_idx]
for expert_idx in range(num_original_expert):
workload_new[layer_idx][expert_idx] = workload_dict[expert_idx]
return workload_new
@staticmethod
# Split hot (high-load) experts into redundant experts
def original_compute_balanced_pack_redundancy(origin_weights, card_num,
num_redundancy_expert):
# Step 1: Sort the items by weight in descending order (we are sorting by weight now)
# Sort based on the second element (the second value of each tuple)
route_expert_num = len(origin_weights)
route_expert_redundancy: list[list[int]] = [
[] for _ in range(route_expert_num)
]
for i in range(num_redundancy_expert):
sorted_indices = np.argsort([t[1] for t in origin_weights],
kind='stable')[::-1]
weights = [origin_weights[idx] for idx in sorted_indices]
tmp_raw_weight = weights[0][1] * (
len(route_expert_redundancy[weights[0][0]]) + 1)
route_expert_redundancy[weights[0][0]].append(route_expert_num + i)
avg_weight = tmp_raw_weight / (
len(route_expert_redundancy[weights[0][0]]) + 1)
weights[0] = (weights[0][0], avg_weight)
origin_weights = weights
# Step 2: Calculate the number of items per box
expert_num = route_expert_num + num_redundancy_expert
items_per_box = expert_num // card_num # Number of items per box
remaining_items = expert_num % card_num # Number of items per box
# Step 3: Initialize card_num boxes with empty lists to store item IDs
boxes: list[list[int]] = [[] for _ in range(card_num)]
boxes_weights: list[list[float]] = [[] for _ in range(card_num)]
box_weights = [0] * card_num # To store the total weight of each box
box_counts = [0] * card_num # To store the number of items in each box
index = 0
for i in range(route_expert_num):
redundancy_num = len(route_expert_redundancy[i])
for _ in range(redundancy_num):
cur_weight = 0
for item, weight in origin_weights:
if item == i:
cur_weight = weight
boxes[index].append(i)
boxes_weights[index].append(cur_weight)
box_weights[index] += cur_weight
box_counts[index] += 1
index += 1
sorted_indices = np.argsort([t[1] for t in origin_weights],
kind='stable')[::-1]
origin_weights = [origin_weights[idx] for idx in sorted_indices]
# Step 4: Distribute items into boxes based on weight
for item_id, weight in origin_weights:
# Find the box with the least items but not full
min_box_index = -1
for i in range(card_num):
if item_id in boxes[i]:
continue
# Only choose boxes that still have space (box_counts[i] < items_per_box)
if box_counts[i] < items_per_box or (box_counts[i]
== items_per_box
and remaining_items > 0):
if min_box_index == -1 or box_weights[i] < box_weights[
min_box_index]:
min_box_index = i
# Place the item (id) into the selected box
boxes[min_box_index].append(item_id)
boxes_weights[min_box_index].append(weight)
box_weights[min_box_index] += weight
box_counts[min_box_index] += 1
# If there's an imbalance in the remaining items, reduce the "remaining_items" counter
if box_counts[min_box_index] == (items_per_box +
1) and remaining_items > 0:
remaining_items -= 1
# Step 5: Output each box's contents and total weight
result = []
for i in range(card_num):
result.append({
"box_index": i + 1,
"items": boxes[i], # List of item IDs in the box
"weight": boxes_weights[i],
"total_weight": box_weights[i], # Total weight in this box
"item_count": box_counts[i] # Number of items in the box
})
return result, boxes
# Split hot (high-load) experts into redundant experts
@staticmethod
def compute_balanced_pack_redundancy(origin_weights, card_num,
num_redundancy_expert):
route_expert_num = len(origin_weights)
route_expert_redundancy: list[list[int]] = [
[] for _ in range(route_expert_num)
]
for i in range(num_redundancy_expert):
sorted_indices = np.argsort([t[1] for t in origin_weights],
kind='stable')[::-1]
weights = [origin_weights[idx] for idx in sorted_indices]
tmp_raw_weight = weights[0][1] * (
len(route_expert_redundancy[weights[0][0]]) + 1)
route_expert_redundancy[weights[0][0]].append(route_expert_num + i)
avg_weight = tmp_raw_weight / (
len(route_expert_redundancy[weights[0][0]]) + 1)
weights[0] = (weights[0][0], avg_weight)
origin_weights = weights
expert_num = route_expert_num + num_redundancy_expert
if card_num == 0:
raise RuntimeError("card_num can not be 0.")
items_per_box = expert_num // card_num
remaining_items = expert_num % card_num
boxes: list[list[int]] = [[] for _ in range(card_num)]
boxes_weights: list[list[float]] = [[] for _ in range(card_num)]
box_weights = [0] * card_num
box_counts = [0] * card_num
all_weights = np.zeros((expert_num, ), dtype='object')
all_weights[:route_expert_num] = origin_weights
index = route_expert_num
for i in range(route_expert_num):
redundancy_num = len(route_expert_redundancy[i])
for _ in range(redundancy_num):
for item, weight in origin_weights:
if item == i:
all_weights[index] = (item, weight)
index += 1
sorted_indices = np.argsort([t[1] for t in all_weights],
kind='stable')[::-1]
all_weights = [all_weights[idx] for idx in sorted_indices]
for item_id, weight in all_weights:
min_box_index = -1
for i in range(card_num):
if box_counts[i] < items_per_box or (box_counts[i]
== items_per_box
and remaining_items > 0):
if min_box_index == -1 or box_weights[i] < box_weights[
min_box_index]:
if item_id not in boxes[i]:
min_box_index = i
boxes[min_box_index].append(item_id)
boxes_weights[min_box_index].append(weight)
box_weights[min_box_index] += weight
box_counts[min_box_index] += 1
if box_counts[min_box_index] == (items_per_box +
1) and remaining_items > 0:
remaining_items -= 1
result = []
for i in range(card_num):
result.append({
"box_index": i + 1,
"items": boxes[i],
"weight": boxes_weights[i],
"total_weight": box_weights[i],
"item_count": box_counts[i]
})
return result, boxes
# Scheme without redundant experts
@staticmethod
def compute_balanced_pack(origin_weights, card_num):
sorted_indices = np.argsort([t[1] for t in origin_weights])[::-1]
weights = origin_weights[sorted_indices]
expert_num = len(weights)
if card_num == 0:
raise RuntimeError("card_num can not be 0.")
items_per_box = expert_num // card_num
remaining_items = expert_num % card_num
boxes: list[list[int]] = [[] for _ in range(card_num)]
boxes_weights: list[list[float]] = [[] for _ in range(card_num)]
box_weights = [0] * card_num
box_counts = [0] * card_num
for item_id, weight in weights:
min_box_index = -1
for i in range(card_num):
if box_counts[i] < items_per_box or (box_counts[i]
== items_per_box
and remaining_items > 0):
if min_box_index == -1 or box_weights[i] < box_weights[
min_box_index]:
min_box_index = i
boxes[min_box_index].append(item_id)
boxes_weights[min_box_index].append(weight)
box_weights[min_box_index] += weight
box_counts[min_box_index] += 1
if box_counts[min_box_index] == (items_per_box +
1) and remaining_items > 0:
remaining_items -= 1
result = []
for i in range(card_num):
result.append({
"box_index": i + 1,
"items": boxes[i],
"weight": boxes_weights[i],
"total_weight": box_weights[i],
"item_count": box_counts[i]
})
return result, boxes
@staticmethod
def get_redundant_num(npu_num, counts):
redundant_num_each_npu: int = np.sum(counts - 1)
return redundant_num_each_npu
@staticmethod
def calculate_max_heat_per_layer(workload_table, layer_num):
max_heat_per_layer: list[float] = []
for layer_idx in range(layer_num):
npu_heats_now = np.sum(workload_table[layer_idx], axis=1)
max_heat_per_layer.append(np.max(npu_heats_now))
return max_heat_per_layer
@staticmethod
def constraint_expert_local_exchange(current_expert_table,
global_deployment):
for layer_id in range(len(global_deployment)):
for card_id in range(len(global_deployment[layer_id])):
current_list = [
int(x) for x in current_expert_table[layer_id][card_id]
]
new_list = [
int(x) for x in global_deployment[layer_id][card_id]
]
num = len(new_list)
new_index = [-1] * num
new_result = [-1] * num
remaining_elements = []
for i in range(num):
flag = True
for j in range(num):
if new_list[i] == current_list[j] and new_index[
j] == -1:
new_index[j] = 0
new_result[j] = current_list[j]
flag = False
break
if flag:
remaining_elements.append(new_list[i])
index = 0
for k in range(num):
if new_result[k] == -1:
new_result[k] = remaining_elements[index]
index += 1
global_deployment[layer_id][card_id] = new_result
return global_deployment
def rebalance_experts(self, current_expert_table, expert_workload):
info = DynamicTable()
info.workload_table = np.array(expert_workload)
info.placement_table = np.array(current_expert_table)
assert info.workload_table is not None
layer_num, num_npus, experts_per_npu = info.workload_table.shape
assert info.placement_table is not None
row = cast(np.ndarray, info.placement_table[0])
expert_ids, counts = np.unique(row, return_counts=True)
num_redundancy_expert = self.get_redundant_num(num_npus, counts)
num_original_expert = len(expert_ids)
layer_workloads = self.add_redundant(info.placement_table,
info.workload_table,
num_original_expert)
max_heat_per_layer_before = self.calculate_max_heat_per_layer(
info.workload_table, layer_num)
npu_heat_all_origin = sum(max_heat_per_layer_before)
# Perform load balancing and deploy redundant experts
layer_num = layer_workloads.shape[0]
expert_num = layer_workloads.shape[1]
# Validate that the number of experts, number of cards, and number of redundant experts do not exceed the number of cards
if num_original_expert != expert_num:
raise ValueError(
f"the number of original experts {num_original_expert} must be equal to expert_num {expert_num}"
)
if num_npus <= 0:
raise ValueError("the number of NPUs must be greater than 0")
if num_npus < num_redundancy_expert:
raise ValueError(
f"the number of NPUs {num_npus} must be greater than or equal to the number of redundant experts {num_redundancy_expert}"
)
# Number of experts deployed on each card includes one redundant expert
global_deployment: list[list[list[int]]] = [[[]
for _ in range(num_npus)]
for _ in range(layer_num)]
# Iterate to obtain the placement strategy for each layer, taking computational balance into account
max_heat_per_layer_after = np.zeros([layer_num])
for layer in range(layer_num):
# Get the expert IDs and their corresponding workloads for the current layer;
# workloads need to be normalized, and one redundant expert is added per card
weights = np.zeros((expert_num, ), dtype='object')
for expert_id, workload_weight in enumerate(
layer_workloads[layer]):
weights[expert_id] = (expert_id, workload_weight)
# Obtain the globally balanced placement strategy for each layer
result, layer_deployment = self.original_compute_balanced_pack_redundancy(
weights, num_npus, num_redundancy_expert)
global_deployment[layer] = layer_deployment
max_heat_per_layer_after[layer] = max(
result, key=lambda x: x['total_weight'])['total_weight']
new_global_deployment = self.constraint_expert_local_exchange(
current_expert_table, global_deployment)
# Obtain the priority of each layer
layer_changed_ratio = []
for layer_idx in range(layer_num):
layer_changed_ratio.append(max_heat_per_layer_after[layer_idx] /
max_heat_per_layer_before[layer_idx])
per_layer_priority = np.argsort(layer_changed_ratio)
npu_heat_all_after = sum(max_heat_per_layer_after)
change = 0
if npu_heat_all_after < 0.95 * npu_heat_all_origin:
change = 1
return change, per_layer_priority, np.array(
new_global_deployment).tolist()

View File

@@ -0,0 +1,771 @@
# Copyright Huawei Technologies Co., Ltd. 2024-2025. All rights reserved.
# Todo: Once https://github.com/vllm-project/vllm/pull/24069 is merged in vllm. Remove this policy.
from abc import abstractmethod
from collections import defaultdict
import numpy as np
class DynamicConfig:
placement_policy = None
max_transferred_expert_per_layer = 100 # Maximum number of experts that can be migrated per layer on a single host
ep_worldsize = 64 # Total number of dies across the entire cluster where experts are distributed
num_die_per_host = 8 # Number of dies on each host machine
class EplbPolicy:
def __init__(self, config: DynamicConfig):
self.config = config
@abstractmethod
def rebalance_experts(self, current_expert_table, expert_workload):
"""
Pass in the weights and return expert replication and placement under relevant constraints.
INPUT:
current_expert_table: [layerId, rankId, expert_num_i]
expert_workload = expert_table[layer0][rankId][expert_num_i]
RETURNED: (res, expert_table)
res:
1 -- table_changed
0 -- not_changed
expert_table: [layerId, rankId, expert_num_i]
expert_num_i --- [0, MaxExpertPerRank]
expertID = expert_table[layer0][rankId][expert_num_i]
array_values:
[0, 1, 2, 3, 248]
[4, 5, 6, 7, 254]
[8, 9, 10, 11, 71]
...
[252, 253, 254, 255, 0]
"""
pass
class DynamicTable:
# workload_table:
# 3D matrix: [layer, gpus, experts_per_gpu_per_layer] -> value: workload (heat) at the corresponding position
# Size: number of layers * number of GPUs * number of experts per GPU per layer
# The element at (i, j, k) represents the workload (heat) of the k-th expert on the j-th GPU in the i-th layer
# For experts that are not available or collected, the value is set to -1
workload_table = None
# placement_table:
# 3D matrix: [layer, gpus, experts_per_gpu_per_layer] -> value: physical expert ID at the corresponding position
# Size: number of layers * number of GPUs * number of experts per GPU per layer
# The element at (i, j, k) represents the physical expert ID of the k-th expert on the j-th GPU in the i-th layer
# For experts that are not available or collected, the value is set to -1
placement_table = None
class DynamicEplbV2(EplbPolicy):
def __init__(self, config: DynamicConfig):
super().__init__(config)
@staticmethod
def safe_divide(a, b):
if b == 0:
print("Division by zero is not allowed")
return 0
return a / b
@staticmethod
def safe_exact_divide(a, b):
if b == 0:
print("Division by zero is not allowed")
return 0
return a // b
@staticmethod
def safe_mod(a, b):
if b == 0:
print("Division by zero is not allowed")
return 0
return a % b
@staticmethod
def add_redundant(current_expert_table, expert_workload,
num_original_expert):
layer_num, npu_num, experts_per_npu = expert_workload.shape
workload_new = np.zeros((layer_num, num_original_expert))
for layer_idx in range(layer_num):
workload_dict: dict[int, int] = defaultdict(int)
placement_layer = current_expert_table[layer_idx].copy()
workload_layer = expert_workload[layer_idx].copy()
for npu_idx in range(npu_num):
for expert_idx in range(experts_per_npu):
workload_dict[placement_layer[npu_idx][
expert_idx]] += workload_layer[npu_idx][expert_idx]
for expert_idx in range(num_original_expert):
workload_new[layer_idx][expert_idx] = workload_dict[expert_idx]
return workload_new
@staticmethod
def get_redundant_num(npu_num, counts):
redundant_num_each_npu: int = int(np.sum(counts - 1))
return redundant_num_each_npu
@staticmethod
def calculate_max_heat_per_layer(workload_table, layer_num):
max_heat_per_layer: list[float] = []
for layer_idx in range(layer_num):
npu_heats_now = np.sum(workload_table[layer_idx], axis=1)
max_heat_per_layer.append(np.max(npu_heats_now))
return max_heat_per_layer
def calculate_initial_imbalance(self, global_deployment,
new_layer_workloads):
device_num = global_deployment.shape[1]
layer_imbalance = []
expert_num = np.zeros_like(new_layer_workloads)
for layer_id, layer in enumerate(global_deployment):
for device in layer:
for expert_id in device:
expert_num[layer_id][expert_id] += 1
for layer_id, layer in enumerate(global_deployment):
cur_layer_max_workload = 0
total_workload = 0
for box in layer:
box_workload = 0
for expert_id in box:
update_workload = self.safe_divide(
new_layer_workloads[layer_id][expert_id],
expert_num[layer_id][expert_id])
box_workload += update_workload
total_workload += update_workload
if cur_layer_max_workload < box_workload:
cur_layer_max_workload = box_workload
cur_layer_imbalance = self.safe_divide(
cur_layer_max_workload,
(self.safe_divide(total_workload, device_num)))
layer_imbalance.append(cur_layer_imbalance)
return layer_imbalance
def compute_redundant_assignments(self, base_experts,
num_redundant_experts, num_experts):
redundant_assignments: list[list[int]] = [[]
for _ in range(num_experts)]
current_weights = base_experts.copy()
for i in range(num_redundant_experts):
sorted_indices = np.argsort([w for _, w in current_weights],
kind='stable')[::-1]
sorted_weights = [current_weights[i] for i in sorted_indices]
target_expert = sorted_weights[0]
expert_id, original_weight = target_expert
current_redundancy = len(redundant_assignments[expert_id])
new_avg_weight = self.safe_divide(
original_weight * (current_redundancy + 1),
(current_redundancy + 2))
redundant_assignments[expert_id].append(num_experts + i)
current_weights[sorted_indices[0]] = (expert_id, new_avg_weight)
sorted_indices = np.argsort([w for _, w in current_weights],
kind='stable')[::-1]
sorted_weights = [current_weights[i] for i in sorted_indices]
return redundant_assignments, sorted_weights
def repeat_compute_redundant_assignments(self, layer_workloads, rendun_pos,
num_experts, num_exist_expert,
device_assignments, device_counts,
expert_from_device,
com_between_devices):
current_weights = np.zeros((num_experts, ), dtype='object')
for expert_id, workload_weight in enumerate(layer_workloads):
current_weights[expert_id] = (expert_id, workload_weight)
devices_with_slots = []
for device_id, device_rendun_pos in enumerate(rendun_pos):
if len(device_rendun_pos) != 0:
devices_with_slots.append(device_id)
while devices_with_slots:
sorted_indices = np.argsort([w for _, w in current_weights],
kind='stable')[::-1]
sorted_weights = [current_weights[i] for i in sorted_indices]
for index, target_weight in enumerate(sorted_weights):
expert_id, original_weight = target_weight
if original_weight == -1:
print("Error:Redundant expert failure re-occurred")
redundancy_successful = True
break
redundancy_successful = False
for cur_device_id in devices_with_slots:
if expert_id not in device_assignments[cur_device_id]:
pos = rendun_pos[cur_device_id].pop()
if len(rendun_pos[cur_device_id]) == 0:
devices_with_slots = [
device_id for device_id in devices_with_slots
if device_id != cur_device_id
]
device_assignments[cur_device_id][pos] = expert_id
device_counts[cur_device_id] += 1
communication_box_index = expert_from_device[expert_id]
com_between_devices[cur_device_id][
communication_box_index] = expert_id
new_weight = self.safe_divide(
(original_weight * num_exist_expert[expert_id]),
(num_exist_expert[expert_id] + 1))
sorted_weights[index] = (expert_id, new_weight)
num_exist_expert[expert_id] += 1
redundancy_successful = True
break
if redundancy_successful:
break
sorted_indices = np.argsort([id for id, _ in sorted_weights],
kind='stable')
sorted_weights = [sorted_weights[i][1] for i in sorted_indices]
return sorted_weights, device_assignments, device_counts, com_between_devices
@staticmethod
def prepare_expert_list(base_experts, redundant_assignments,
num_redundant_experts):
redundant_expert_list = np.empty(num_redundant_experts, dtype=object)
index = 0
num_experts = len(redundant_assignments)
for expert_id in range(num_experts):
for _ in redundant_assignments[expert_id]:
redundant_expert_list[index] = (expert_id,
next(w
for eid, w in base_experts
if eid == expert_id))
index += 1
sorted_indices = np.argsort([w for _, w in redundant_expert_list],
kind='stable')[::-1]
return [redundant_expert_list[i] for i in sorted_indices]
@staticmethod
def non_redundant_expert_information(origin_deployment, updated_weights,
rendun_pos):
device_num = len(origin_deployment)
num_experts_per_device = origin_deployment.shape[1]
device_assignments = [[-1 for _ in range(num_experts_per_device)]
for _ in range(device_num)]
device_weights = [[0 for _ in range(num_experts_per_device)]
for _ in range(device_num)]
device_loads = [0] * device_num
device_counts = [0] * device_num
for device_id, device in enumerate(origin_deployment):
for index, expert_id in enumerate(device):
if index in rendun_pos[device_id]:
continue
device_assignments[device_id][index] = expert_id
cur_weight = next(
weight for expert_id_of_weight, weight in updated_weights
if expert_id_of_weight == expert_id)
device_weights[device_id][index] = cur_weight
device_loads[device_id] += cur_weight
device_counts[device_id] += 1
return device_assignments, device_weights, device_loads, device_counts
def recomputing_initial_weight(self, layer_workloads, device_assignments):
num_all_experts = [0] * len(layer_workloads)
for device in device_assignments:
for expert_id in device:
if expert_id != -1:
num_all_experts[expert_id] += 1
cur_layer_workload = []
for expert_id, weight in enumerate(layer_workloads):
if num_all_experts[expert_id] == 0:
cur_layer_workload.append(-1)
else:
cur_layer_workload.append(
self.safe_divide(weight, num_all_experts[expert_id]))
return cur_layer_workload, num_all_experts
def distribute_redun_experts(self, layer_workloads, device_assignments,
device_weights, device_loads, device_counts,
redundant_expert_list, expert_from_device,
num_experts, rendun_pos):
num_devices = len(device_assignments)
com_between_devices: list[dict[int,
int]] = [{} for _ in range(num_devices)]
for expert_id, weight in redundant_expert_list:
candidate = -1
for dev_id in range(num_devices):
if len(rendun_pos[dev_id]) == 0:
continue
if expert_id in device_assignments[dev_id]:
continue
if candidate == -1 or device_loads[dev_id] < device_loads[
candidate]:
candidate = dev_id
if candidate != -1:
pos = rendun_pos[candidate].pop()
device_assignments[candidate][pos] = expert_id
device_weights[candidate][pos] = weight
device_loads[candidate] += weight
device_counts[candidate] += 1
communication_box_index = expert_from_device[expert_id]
com_between_devices[candidate][
communication_box_index] = expert_id
if any(sublist for sublist in rendun_pos):
cur_layer_workload, num_exist_expert = self.recomputing_initial_weight(
layer_workloads, device_assignments)
update_workload, device_assignments, device_counts, com_between_devices = self.repeat_compute_redundant_assignments(
cur_layer_workload, rendun_pos, num_experts, num_exist_expert,
device_assignments, device_loads, expert_from_device,
com_between_devices)
device_loads = [0] * len(device_counts)
for device_id, device in enumerate(device_assignments):
for index, expert_id in enumerate(device):
device_weights[device_id][index] = update_workload[
expert_id]
device_loads[device_id] += update_workload[expert_id]
return device_assignments, device_weights, device_loads, device_counts, com_between_devices
def redundancy_again(self, layer_workloads, origin_weights,
origin_deployment, expert_from_device, num_node,
is_node_redundant, rendun_pos):
num_experts = len(origin_weights)
if is_node_redundant:
num_experts = num_experts * num_node
num_redundant_experts = 0
for rank_empty_pos in rendun_pos:
num_redundant_experts += len(rank_empty_pos)
redundant_assignments, updated_weights = self.compute_redundant_assignments(
origin_weights, num_redundant_experts, num_experts)
redundant_expert_list = self.prepare_expert_list(
updated_weights, redundant_assignments, num_redundant_experts)
device_assignments, device_weights, device_loads, device_counts = self.non_redundant_expert_information(
origin_deployment, updated_weights, rendun_pos)
device_assignments, device_weights, device_loads, device_counts, com_between_devices = self.distribute_redun_experts(
layer_workloads, device_assignments, device_weights, device_loads,
device_counts, redundant_expert_list, expert_from_device,
num_experts, rendun_pos)
return device_assignments, device_weights, device_loads, device_counts, com_between_devices
@staticmethod
def generate_allocation_report(device_assignments, device_weights,
device_loads, device_counts):
report = []
max_load = 0.0
for dev_id in range(len(device_assignments)):
current_load = device_loads[dev_id]
max_load = max(max_load, current_load)
report.append({
"device_id": dev_id + 1,
"assigned_experts": device_assignments[dev_id],
"expert_weights": device_weights[dev_id],
"total_load": current_load,
"expert_count": device_counts[dev_id]
})
return report, max_load
@staticmethod
def exchange_expert(cur_exchange_index, next_exchange_index, cur_device_id,
next_device_id, cur_layer_result, com_between_devices):
cur_device_deployment = cur_layer_result[cur_device_id][
'assigned_experts']
next_device_deployment = cur_layer_result[next_device_id][
'assigned_experts']
cur_device_weight = cur_layer_result[cur_device_id]['expert_weights']
next_device_weight = cur_layer_result[next_device_id]['expert_weights']
cur_expert_id = cur_device_deployment[cur_exchange_index]
next_expert_id = next_device_deployment[next_exchange_index]
cur_device_deployment[cur_exchange_index] = next_expert_id
next_device_deployment[next_exchange_index] = cur_expert_id
cur_expert_weight = cur_device_weight[cur_exchange_index]
next_expert_weight = next_device_weight[next_exchange_index]
cur_device_weight[cur_exchange_index] = next_expert_weight
next_device_weight[next_exchange_index] = cur_expert_weight
cur_layer_result[cur_device_id][
'total_load'] += next_expert_weight - cur_expert_weight
cur_layer_result[next_device_id][
'total_load'] += cur_expert_weight - next_expert_weight
com_between_devices[cur_device_id][next_device_id] = next_expert_id
com_between_devices[next_device_id][cur_device_id] = cur_expert_id
def redundant_expert_deployment(self, layer_workloads, original_deployment,
expert_from_device, node_num,
is_node_redundant, rendun_pos):
device_num, per_device_expert_num = original_deployment.shape
route_expert_num = layer_workloads.shape[0]
per_node_device_num = self.safe_exact_divide(device_num, node_num)
per_node_route_expert_num = per_node_device_num * (
per_device_expert_num - 1)
weights = np.zeros((route_expert_num, ), dtype='object')
for expert_id, workload_weight in enumerate(layer_workloads):
weights[expert_id] = (expert_id, workload_weight)
if is_node_redundant:
device_assignments = []
device_weights = []
device_loads = []
device_counts = []
com_between_devices = []
for node_id in range(node_num):
cur_node_weights = weights[node_id *
per_node_route_expert_num:(node_id +
1) *
per_node_route_expert_num]
cur_original_deployment = original_deployment[
node_id * per_node_device_num:(node_id + 1) *
per_node_device_num]
cur_node_rendun_pos = rendun_pos[node_id *
per_node_device_num:(node_id +
1) *
per_node_device_num]
cur_device_assignments, cur_device_weights, cur_device_loads, cur_device_counts, cur_com_between_devices = self.redundancy_again(
layer_workloads, cur_node_weights, cur_original_deployment,
expert_from_device, node_num, is_node_redundant,
cur_node_rendun_pos)
device_assignments += cur_device_assignments
device_weights += cur_device_weights
device_loads += cur_device_loads
device_counts += cur_device_counts
com_between_devices += cur_com_between_devices
else:
device_assignments, device_weights, device_loads, device_counts, com_between_devices = self.redundancy_again(
layer_workloads, weights, original_deployment,
expert_from_device, node_num, is_node_redundant, rendun_pos)
report, max_load = self.generate_allocation_report(
device_assignments, device_weights, device_loads, device_counts)
return report, max_load, com_between_devices
@staticmethod
def two_device_exchange_experts(cur_device_result, exchange_device_result,
cur_exchanged_expert_id,
next_exchanged_expert_id, ave_workload,
increment, num_redundancy_expert):
cur_device_weight = cur_device_result['expert_weights']
next_device_weight = exchange_device_result['expert_weights']
cur_device_expert_id = cur_device_result['assigned_experts']
next_device_expert_id = exchange_device_result['assigned_experts']
cur_device_total_weight = cur_device_result['total_load']
next_device_total_weight = exchange_device_result['total_load']
max_weight = max(cur_device_total_weight, next_device_total_weight)
cur_exchange_index = -1
next_exchange_index = -1
for index, weight in enumerate(cur_device_weight):
for next_index, next_weight in enumerate(next_device_weight):
change_flag = True
if (cur_device_expert_id[index] in next_device_expert_id
or next_device_expert_id[next_index]
in cur_device_expert_id):
change_flag = False
if (cur_device_expert_id[index] not in cur_exchanged_expert_id
) and (next_device_expert_id[next_index]
not in next_exchanged_expert_id) and change_flag:
cur_total_weight_after_exchange = cur_device_total_weight - weight + next_weight
next_total_weight_after_exchange = next_device_total_weight - next_weight + weight
exchange_max_weight = max(
cur_total_weight_after_exchange,
next_total_weight_after_exchange)
if exchange_max_weight < max_weight and (
max_weight -
exchange_max_weight) >= (ave_workload * increment):
max_weight = exchange_max_weight
cur_exchange_index = index
next_exchange_index = next_index
return cur_exchange_index, next_exchange_index
def expert_exchange_between_devices(self,
ave_workload,
increment,
cur_layer_result,
com_between_devices,
num_redundancy_expert,
node_idx=0,
per_node_device_num=0,
is_node_redundant=False):
if is_node_redundant:
cur_devices_result = cur_layer_result[node_idx *
per_node_device_num:
(node_idx + 1) *
per_node_device_num]
else:
cur_devices_result = cur_layer_result
devices_total_weight = []
for device in cur_devices_result:
devices_total_weight.append(
(device['total_load'], device['device_id'] - 1))
exchange_frequency = 100
while exchange_frequency > 0:
exchange_frequency -= 1
devices_total_weight.sort(key=lambda x: x[0])
max_weight_device_id = devices_total_weight[-1][1]
exchange = False
for index in range(0, len(devices_total_weight) - 1):
min_weight_device_id = devices_total_weight[index][1]
if min_weight_device_id not in com_between_devices[
max_weight_device_id]:
cur_exchanged_expert_id = list(
com_between_devices[max_weight_device_id].values())
next_exchanged_expert_id = list(
com_between_devices[min_weight_device_id].values())
cur_exchange_index, next_exchange_index = self.two_device_exchange_experts(
cur_layer_result[max_weight_device_id],
cur_layer_result[min_weight_device_id],
cur_exchanged_expert_id, next_exchanged_expert_id,
ave_workload, increment, num_redundancy_expert)
if cur_exchange_index != -1:
self.exchange_expert(cur_exchange_index,
next_exchange_index,
max_weight_device_id,
min_weight_device_id,
cur_layer_result,
com_between_devices)
devices_total_weight[-1] = (
cur_layer_result[max_weight_device_id]
['total_load'], max_weight_device_id)
devices_total_weight[index] = (
cur_layer_result[min_weight_device_id]
['total_load'], min_weight_device_id)
exchange = True
break
if not exchange:
break
def exchange_experts(self, layer_result, layer_com_between_devices,
num_nodes, device_num, is_node_redundant,
ave_workload, increment, num_redundancy_expert,
org_deployment):
global_deployment = []
if is_node_redundant:
per_node_device_num = self.safe_exact_divide(device_num, num_nodes)
for node_idx in range(num_nodes):
self.expert_exchange_between_devices(
ave_workload, increment, layer_result,
layer_com_between_devices, num_redundancy_expert, node_idx,
per_node_device_num, is_node_redundant)
else:
self.expert_exchange_between_devices(ave_workload, increment,
layer_result,
layer_com_between_devices,
num_redundancy_expert)
max_workload = 0
for box in layer_result:
global_deployment.append(box['assigned_experts'])
if max_workload < box['total_load']:
max_workload = box['total_load']
global_deployment = np.array(global_deployment)
return global_deployment, max_workload
def count_elements(self, lst):
count = 0
for item in lst:
if isinstance(item, list):
count += self.count_elements(item)
else:
count += 1
return count
@staticmethod
def constraint_expert_local_exchange(current_expert_table,
global_deployment):
for layer_id in range(len(global_deployment)):
for card_id in range(len(global_deployment[layer_id])):
current_list = [
int(x) for x in current_expert_table[layer_id][card_id]
]
new_list = [
int(x) for x in global_deployment[layer_id][card_id]
]
num = len(new_list)
new_index = [-1] * num
new_result = [-1] * num
remaining_elements = []
for i in range(num):
flag = True
for j in range(num):
if new_list[i] == current_list[j] and new_index[
j] == -1:
new_index[j] = 0
new_result[j] = current_list[j]
flag = False
break
if flag:
remaining_elements.append(new_list[i])
index = 0
for k in range(num):
if new_result[k] == -1:
new_result[k] = remaining_elements[index]
index += 1
global_deployment[layer_id][card_id] = new_result
return global_deployment
def rebalance_experts(self,
current_expert_table,
expert_workload,
is_node_redundant=False,
increment=0.01):
info = DynamicTable()
info.workload_table = expert_workload.numpy()
info.placement_table = current_expert_table.numpy()
assert info.workload_table is not None
layer_num, num_npus, experts_per_npu = info.workload_table.shape
expert_ids, counts = np.unique(info.placement_table[0],
return_counts=True)
num_redundancy_expert = self.get_redundant_num(num_npus, counts)
num_original_expert = len(expert_ids)
layer_workloads = self.add_redundant(info.placement_table,
info.workload_table,
num_original_expert)
max_heat_per_layer_before = self.calculate_max_heat_per_layer(
info.workload_table, layer_num)
npu_heat_all_origin = sum(max_heat_per_layer_before)
num_node = self.safe_exact_divide(num_npus, 8)
layer_num = layer_workloads.shape[0]
expert_num = layer_workloads.shape[1]
expert_from_device = np.zeros((layer_num, num_original_expert))
if num_original_expert != expert_num:
raise ValueError(
f"The number of original experts ({num_original_expert}) must match expert_num ({expert_num})"
)
if num_npus <= 0:
raise ValueError("The number of NPUs must be greater than 0")
if num_npus < num_redundancy_expert:
raise ValueError(
f"The number of NPUs ({num_npus}) must be greater than or equal to the number of redundant experts ({num_redundancy_expert})"
)
global_deployment: list[list[list[int]]] = [[[]
for _ in range(num_npus)]
for _ in range(layer_num)]
layer_initial_imbalance = self.calculate_initial_imbalance(
info.placement_table, layer_workloads)
max_heat_per_layer_after = np.zeros([layer_num])
sum_num = 0
for layer in range(layer_num):
# print(f"Load imbalance ratio of layer {layer} under the new workload", layer_initial_imbalance[layer])
if layer_initial_imbalance[layer] < 1.01:
global_deployment[layer] = info.placement_table[layer]
continue
ave_workload = self.safe_divide(np.sum(layer_workloads[layer]),
num_npus)
rendun_pos: list[list[int]] = [[] for _ in range(num_npus)]
existing_experts = set()
for device_id, device in enumerate(info.placement_table[layer]):
for index, expert_id in enumerate(device):
if expert_id not in existing_experts:
existing_experts.add(expert_id)
expert_from_device[layer][expert_id] = device_id
else:
rendun_pos[device_id].append(index)
result, max_workload, com_between_devices = self.redundant_expert_deployment(
layer_workloads[layer], info.placement_table[layer],
expert_from_device[layer], num_node, is_node_redundant,
rendun_pos)
# print(layer, f"Imbalance Ratio after Redundancy Adjustment:", self.safe_divide(max_workload, ave_workload))
global_deployment[layer], new_max_workload = self.exchange_experts(
result, com_between_devices, num_node, num_npus,
is_node_redundant, ave_workload, increment,
num_redundancy_expert, info.placement_table[layer])
# print(layer, f"Imbalance Ratio after Swap Adjustment:", self.safe_divide(new_max_workload, ave_workload))
for device_id in range(num_npus):
com_between_devices[device_id] = {
key: value
for key, value in com_between_devices[device_id].items()
}
sum_num += self.count_elements(com_between_devices[device_id])
max_heat_per_layer_after[layer] = max(
result, key=lambda x: x['total_load'])['total_load']
layer_changed_ratio = []
for layer_idx in range(layer_num):
layer_changed_ratio.append(
self.safe_divide(max_heat_per_layer_after[layer_idx],
max_heat_per_layer_before[layer_idx]))
per_layer_priority = np.argsort(layer_changed_ratio)
npu_heat_all_after = sum(max_heat_per_layer_after)
change = 0
if npu_heat_all_after < 0.95 * npu_heat_all_origin:
change = 1
new_global_deployment = self.constraint_expert_local_exchange(
current_expert_table, global_deployment)
return change, per_layer_priority, np.array(
new_global_deployment).tolist()

View File

@@ -0,0 +1,33 @@
# Copyright Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
# Todo: Once https://github.com/vllm-project/vllm/pull/24069 is merged in vllm. Remove this factory.
from .policy_abstract import DynamicConfig, EplbPolicy
from .policy_dynamic_ep import DynamicEplb
from .policy_dynamic_ep_v2 import DynamicEplbV2
from .policy_flashlb import FlashLB
from .policy_random import RandomLoadBalance
class PolicyFactory:
@staticmethod
def generate_policy(policy_type: int, config: DynamicConfig) -> EplbPolicy:
policy = {
# Constraint applying Dynamic EPLB policy V2:
# If there exists redundant expert:
# only one redundant expert can be placed in one NPU and its physical expert index must be 0
# Applying greedy d2d expert weight update composing
0:
RandomLoadBalance, # RandomLoadBalance: shuffle last physical expert on NPU 1 and 3
1:
DynamicEplb, # Dynamic EPLB policy: overall expert replacement based on current moe load
2:
DynamicEplbV2, # Dynamic EPLB policy V2: expert replacement with constrained number of expert shuffle
3:
FlashLB, # FlashLB EPLB policy: expert replacement based on Joint Optimization, Multi-Shot Enhancement and Incremental Adjustment
}
policy_class = policy.get(policy_type, RandomLoadBalance)
policy_instance = policy_class(config)
if policy_type == 3:
policy_instance.warm_up()
return policy_instance

View File

@@ -0,0 +1,651 @@
# Copyright Huawei Technologies Co., Ltd. 2024-2025. All rights reserved.
# Todo: Once https://github.com/vllm-project/vllm/pull/24069 is merged in vllm. Remove this policy.
import logging
from collections import deque
from typing import Dict
import numpy as np
import torch
from numba import njit # type: ignore
from .policy_abstract import DynamicConfig, EplbPolicy
numba_logger = logging.getLogger("numba")
numba_logger.setLevel(logging.WARNING)
@njit
def compute_piece_counts(X, P, stage_weights):
n_stage, N = X.shape
S = P - N
pieces = np.ones(N, dtype=np.int32)
unit = X / pieces # unit[i, j] = X[i, j] / pieces[j]
for _ in range(S):
deltas = np.zeros(N, dtype=np.float32)
for i in range(n_stage):
# Find top1 and top2
idx1 = -1
idx2 = -1
val1 = -1.0
val2 = -1.0
for j in range(N):
v = unit[i, j]
if v > val1:
val2 = val1
idx2 = idx1
val1 = v
idx1 = j
elif v > val2:
val2 = v
idx2 = j
origin = unit[i, idx1]
secv = unit[i, idx2]
alt = X[i, idx1] / (pieces[idx1] + 1)
delta = origin - (alt if alt > secv else secv)
deltas[idx1] += delta * stage_weights[i] if np.any(
delta) != 0 else stage_weights[i]
max_idx = np.argmax(deltas)
pieces[max_idx] += 1
for i in range(n_stage):
unit[i, max_idx] = X[i, max_idx] / pieces[max_idx]
# Compute max load
max_load = 0.0
for j in range(N):
total = 0.0
for i in range(n_stage):
total += unit[i, j]
if total > max_load:
max_load = total
return pieces
@njit
def jsq_placement(X, pieces, M, stage_weights):
n_stage, N = X.shape
total_piece = pieces.sum()
num_per_group = total_piece // M
# 1. Compute unit_hotness
unit_hotness = np.empty((n_stage, N), dtype=np.float32)
for i in range(N):
if pieces[i] > 0:
for s in range(n_stage):
unit_hotness[s, i] = X[s, i] / pieces[i]
else:
for s in range(n_stage):
unit_hotness[s, i] = 0.0
# 2. Sort by total hotness
scores = np.zeros(N, dtype=np.float32)
for i in range(N):
for s in range(n_stage):
scores[i] += unit_hotness[s, i]
idx = np.argsort(-scores)
# 3. Initialization
loads = np.zeros((n_stage, M), dtype=np.float32)
dev_phy_exp_n = np.zeros(M, dtype=np.int32)
deployment = -np.ones((M, num_per_group), dtype=np.int32)
dep_ptr = np.zeros(M, dtype=np.int32)
# 4. Main loop
for t in range(N):
i = idx[t]
used_device = list()
for _ in range(pieces[i]):
# 4.1 Construct w vector
w = np.empty(n_stage, dtype=np.float32)
for s in range(n_stage):
w[s] = unit_hotness[s, i]
# 4.2 Compute stage-level maximum load
stage_max = np.empty(n_stage, dtype=np.float32)
for s in range(n_stage):
max_val = loads[s, 0]
for k in range(1, M):
if loads[s, k] > max_val:
max_val = loads[s, k]
stage_max[s] = max_val
# 4.3 Compute denominator
denom = np.empty(n_stage, dtype=np.float32)
for s in range(n_stage):
sum_tmp = 0.0
for j in range(M):
sum_tmp += loads[s, j] + w[s]
denom[s] = sum_tmp / M + 1e-2
# 4.4 Find best device j
best_j = -1
best_val = 1e30
for j in range(M):
if dev_phy_exp_n[j] >= num_per_group:
continue
if j in used_device:
continue
score = 0.0
for s in range(n_stage):
tmp_sj = loads[s, j] + w[s]
numer_sj = tmp_sj if tmp_sj > stage_max[s] else stage_max[s]
score += stage_weights[s] * (numer_sj / denom[s])
if score < best_val:
best_val = score
best_j = j
if best_j == -1:
continue
used_device.append(best_j)
# 4.5 Update status
for s in range(n_stage):
loads[s, best_j] += w[s]
ptr = dep_ptr[best_j]
deployment[best_j, ptr] = i
dep_ptr[best_j] += 1
dev_phy_exp_n[best_j] += 1
# Handle remaining -1 values: fill with random elements from range(N) not in current column
for rank in range(M):
for col in range(num_per_group):
if deployment[rank, col] == -1:
# Get elements already in current column
current_rank_elements = set(deployment[rank, :])
# Filter elements from range(N) not in current column
available = [
x for x in range(N) if x not in current_rank_elements
]
# Randomly select an available element to fill
if len(available) > 0:
rand_idx = np.random.randint(0, len(available))
deployment[rank, col] = available[rand_idx]
elif N > 0:
# All unique experts are already in this rank's column, so we can pick any expert randomly.
deployment[rank, col] = np.random.randint(0, N)
return deployment
@njit
def slice_values(X, pieces):
total_len = 0
for i in range(X.shape[0]):
total_len += pieces[i]
result = np.empty(total_len, dtype=np.float32)
idx = 0
for i in range(X.shape[0]):
val = X[i] / pieces[i]
for _ in range(pieces[i]):
result[idx] = val
idx += 1
return result
@njit
def group_based_adaptive_bloating_kernel(X, P, M, simulated_pieces,
simulated_deployment, stage_weights):
n_stage, N = X.shape
num_group = P // M
X_all = np.zeros(N, dtype=np.float32)
for i in range(n_stage):
for j in range(N):
X_all[j] += X[i, j]
sort_idx = np.argsort(np.negative(X_all))
X_sorted = X[:, sort_idx]
unit_load = np.empty(N, dtype=np.float32)
for j in range(N):
unit_load[j] = X_all[j] / simulated_pieces[j]
flat_deployment = simulated_deployment.reshape(-1)
simulated_load = np.zeros(M, dtype=np.float32)
for i in range(flat_deployment.shape[0]):
simulated_load[i // (flat_deployment.shape[0] //
M)] += unit_load[flat_deployment[i]]
slice_vals = slice_values(X_all, simulated_pieces)
sorted_slices = np.sort(slice_vals)[::-1]
simulated_slopes = (sorted_slices[:-M + 1] - sorted_slices[M - 1:]) / M
cumulative_slices_used = np.zeros(N, dtype=np.int32)
acc = 0
for i in range(N):
acc += simulated_pieces[sort_idx[i]]
cumulative_slices_used[i] = acc
group_boundary_indices = np.zeros(num_group, dtype=np.int32)
for i in range(1, num_group + 1):
for j in range(N):
if cumulative_slices_used[j] >= i * M:
group_boundary_indices[i - 1] = j
break
slices_used_per_group = np.zeros(num_group, dtype=np.int32)
slices_used_per_group[0] = group_boundary_indices[0]
for i in range(1, num_group):
slices_used_per_group[
i] = group_boundary_indices[i] - group_boundary_indices[i - 1]
slices_used_per_group = M - slices_used_per_group
loads = np.zeros(M, dtype=np.float32)
pieces = np.zeros(N, dtype=np.int32)
num_remain_slice = P - N
current_idx = 0
for g in range(num_group):
window = X_sorted[:, current_idx:current_idx + 2 * M]
low = max(0, current_idx + M - N)
high = min(num_remain_slice, M - 1)
while (high - low) > 1:
mid = int((high + low) // 2)
keep = M - mid
current_group = window[:, :keep]
current_pieces = compute_piece_counts(current_group, M,
stage_weights)
current_pieces = np.maximum(current_pieces, 1)
current_slice = slice_values(current_group.sum(0), current_pieces)
current_slice_sorted = np.sort(current_slice)
current_loads = loads + current_slice_sorted
current_max: np.float32 = np.max(current_loads)
current_min: np.float32 = np.min(current_loads)
current_slope = (current_max - current_min) / M
next_slope: np.float32 = np.max(simulated_slopes[current_idx +
keep:])
if abs(current_slope) > abs(next_slope):
low = mid
else:
high = mid
S = high
keep = M - S
current_group = window[:, :keep]
current_pieces = compute_piece_counts(current_group, M, stage_weights)
for i in range(keep):
pieces[sort_idx[current_idx + i]] = current_pieces[i]
current_slice = slice_values(current_group.sum(0), current_pieces)
current_slice_sorted = np.sort(current_slice)
loads += current_slice_sorted
loads = np.sort(loads)[::-1]
current_idx += keep
num_remain_slice -= S
return pieces
@njit
def compute_objective(deployment, X, pieces):
M, P = deployment.shape
loads = np.zeros(M)
for i in range(M):
for j in range(P):
expert = deployment[i, j]
if pieces[expert] == 0:
continue
loads[i] += X[expert] / pieces[expert]
mean_load = np.mean(loads)
max_load: np.float32 = np.max(loads)
obj = max_load / mean_load
return obj, loads
@njit
def auto_fix_new_placement(old_placement, new_placement):
"""
Adjust the new_placement matrix to ensure elements (including duplicates) that exist in both
old_placement and new_placement remain in their original positions from old_placement.
New elements (unique to new_placement) will fill the remaining empty positions.
Args:
old_placement: Old deployment matrix with shape (num_ranks, num_experts)
new_placement: New deployment matrix to be fixed, must have the same shape as old_placement
Returns:
fixed_new: adjusted version of the new_placement matrix
"""
num_ranks, num_experts = old_placement.shape
fixed_new = np.empty_like(new_placement)
max_expert_old = old_placement.max() if num_experts > 0 else 0
max_expert_new = new_placement.max() if num_experts > 0 else 0
max_expert = max(max_expert_old, max_expert_new)
for rank_id in range(num_ranks):
old_row = old_placement[rank_id]
new_row = new_placement[rank_id]
index_array = np.full((max_expert + 1, num_experts),
-1,
dtype=np.int32)
count_array = np.zeros(max_expert + 1, dtype=np.int32)
for idx in range(num_experts):
val = old_row[idx]
if val >= 0 and val <= max_expert:
pos = count_array[val]
index_array[val, pos] = idx
count_array[val] += 1
old_counter = np.zeros(max_expert + 1, dtype=np.int32)
for idx in range(num_experts):
val = old_row[idx]
if val >= 0 and val <= max_expert:
old_counter[val] += 1
retain_elements = np.empty(num_experts, dtype=new_placement.dtype)
new_elements = np.empty(num_experts, dtype=new_placement.dtype)
retain_ptr = 0
new_ptr = 0
for val in new_row:
if val >= 0 and val <= max_expert and old_counter[val] > 0:
retain_elements[retain_ptr] = val
retain_ptr += 1
old_counter[val] -= 1
else:
new_elements[new_ptr] = val
new_ptr += 1
current_fixed = np.full(num_experts, -1, dtype=new_placement.dtype)
for i in range(retain_ptr):
val = retain_elements[i]
if val >= 0 and val <= max_expert:
pos = count_array[val] - 1
if pos >= 0:
idx = index_array[val, pos]
current_fixed[idx] = val
count_array[val] -= 1
empty_indices = np.empty(num_experts, dtype=np.int32)
empty_ptr = 0
for idx in range(num_experts):
if current_fixed[idx] == -1:
empty_indices[empty_ptr] = idx
empty_ptr += 1
for i in range(new_ptr):
if i < empty_ptr:
current_fixed[empty_indices[i]] = new_elements[i]
fixed_new[rank_id] = current_fixed
return fixed_new
class FlashLB(EplbPolicy):
def __init__(self, config: DynamicConfig):
super().__init__(config)
self.par_history: Dict[int, float] = {}
self.hotness_window: Dict[int, deque[float]] = {}
self.max_stage_window = (config.max_stage_window if hasattr(
config, "max_stage_window") else 1)
self.buffer_expert_layer_num = (
config.buffer_expert_layer_num if hasattr(
config, "buffer_expert_layer_num") else 58)
self.threshold_ratio = (config.threshold_ratio if hasattr(
config, "threshold_ratio") else 0)
def compute_expert_hotness(self, num_of_expert: int,
deployment: np.ndarray, rank_load: np.ndarray):
hotness = np.zeros(num_of_expert, dtype=rank_load.dtype)
deployment_flat = deployment.ravel()
rank_load_flat = rank_load.ravel()
np.add.at(hotness, deployment_flat, rank_load_flat)
return hotness
def compute_rank_load(self, deployment: np.ndarray, hotness: np.ndarray):
n_stage, N = hotness.shape
if np.any(deployment < 0):
print(f"Invalid deployment with negative values: {deployment}")
raise ValueError("Deployment table contains negative values.")
counts = np.bincount(deployment.reshape(-1), minlength=N)
unit_hotness = np.divide(hotness,
counts,
out=np.zeros_like(hotness, dtype=float),
where=counts != 0)
stage_par = np.zeros(n_stage)
for i in range(n_stage):
stage_load = unit_hotness[i][deployment].sum(-1)
stage_par[i] = stage_load.max() / stage_load.mean()
return stage_par.mean()
def group_based_adaptive_bloating(self,
X,
P,
M,
stage_weights=None,
recorsive=False):
n_stage, N = X.shape
if stage_weights is None:
stage_weights = np.ones(n_stage, dtype=np.float32)
if recorsive:
(
simulated_deployment,
simulated_pieces,
) = self.group_based_adaptive_bloating(X,
P,
M,
stage_weights,
recorsive=False)
else:
simulated_pieces = compute_piece_counts(X, P, stage_weights)
simulated_deployment = jsq_placement(X, simulated_pieces, M,
stage_weights)
pieces = group_based_adaptive_bloating_kernel(
X.astype(np.float32),
P,
M,
simulated_pieces.astype(np.int32),
simulated_deployment.astype(np.int32),
stage_weights.astype(np.float32),
)
deployment = jsq_placement(X, pieces, M, stage_weights)
X_all = X.sum(0)
unit_load = np.divide(X_all,
pieces,
out=np.zeros_like(X_all, dtype=float),
where=pieces != 0)
load = unit_load[deployment].sum(-1)
sim_unit_load = X_all / simulated_pieces
sim_load = sim_unit_load[simulated_deployment].sum(-1)
if load.max() > sim_load.max():
return simulated_deployment, simulated_pieces
return deployment, pieces
def need_update(self, current_par, layer_id=0):
threshold = self.par_history.get(layer_id, 0.0)
return current_par >= self.threshold_ratio * threshold
def compute_stage_weight(self, hotness):
n_stage = hotness.shape[0]
stage_weights = np.zeros(n_stage)
for i in range(n_stage):
stage_weights[i] = hotness[i].sum()
stage_weights = stage_weights / stage_weights.max()
return stage_weights
def rebalance_layer(self, deployment, hotness, layer_id=0):
num_rank, expert_per_rank = deployment.shape
num_expert = np.unique(deployment.reshape(-1)).shape[0]
num_of_redundant_expert = num_rank * expert_per_rank - num_expert
current_par = self.compute_rank_load(deployment, hotness)
if not self.need_update(current_par, layer_id):
return deployment, current_par, current_par
stage_weights = self.compute_stage_weight(hotness)
new_deployment, _ = self.group_based_adaptive_bloating(
hotness,
num_expert + num_of_redundant_expert,
num_rank,
stage_weights,
recorsive=False,
)
if np.any(new_deployment < 0):
print(f"{new_deployment=}")
new_par = self.compute_rank_load(new_deployment, hotness)
return new_deployment, new_par, current_par
def register_hotness(self, deployment, rank_load, num_layer, num_expert):
for layer in range(num_layer):
if layer not in self.hotness_window:
self.hotness_window[layer] = deque(
maxlen=self.max_stage_window)
hotness = self.compute_expert_hotness(num_expert,
deployment[layer],
rank_load[layer])
self.hotness_window[layer].append(hotness)
def compress_by_avg_pooling_fast_nd(self, arr, m):
n, d = arr.shape
idx = (np.arange(n) * m // n)
result = np.zeros((m, d))
counts = np.zeros((m, 1))
np.add.at(result, idx, arr)
np.add.at(counts, idx, 1)
return result / counts
def rebalance_experts(self, current_expert_table, expert_workload):
current_deployment = np.array(current_expert_table)
expert_workload = np.array(expert_workload)
expert_workload += 1
num_layer = expert_workload.shape[0]
num_expert = np.unique(current_expert_table[0].reshape(-1)).shape[0]
self.register_hotness(current_deployment, expert_workload, num_layer,
num_expert)
new_deployment = current_deployment.copy()
layers_need_update = np.arange(num_layer)
new_par = np.zeros(layers_need_update.shape[0])
current_par = np.zeros(layers_need_update.shape[0])
for i, layer in enumerate(layers_need_update):
hotness = np.array(self.hotness_window[layer])
if hotness.shape[0] > self.max_stage_window:
hotness = self.compress_by_avg_pooling_fast_nd(
hotness, self.max_stage_window)
(
new_deployment[layer],
new_par[i],
current_par[i],
) = self.rebalance_layer(current_deployment[layer],
hotness,
layer_id=layer)
priority = new_par / current_par
priority_idx = np.argsort(priority)
priority_idx = priority_idx[priority[priority_idx] <
1][:self.buffer_expert_layer_num]
if np.all(expert_workload == 1):
for _, layer in enumerate(layers_need_update):
self.hotness_window[layer].pop()
return False, np.array([], dtype=int), current_deployment
change = len(priority_idx) > 0
if change:
for idx in priority_idx:
self.par_history[layers_need_update[idx]] = new_par[idx]
layers_need_update = priority_idx
deployment = current_deployment
for layer in layers_need_update:
deployment[layer] = auto_fix_new_placement(
current_deployment[layer], new_deployment[layer])
return change, layers_need_update, deployment
def generate_layered_experts(num_layers=58,
layer_shape=(32, 9),
expert_min=0,
expert_max=255):
"""
Generate expert deployment matrix meeting the following conditions:
- Total of num_layers layers
- Each layer has shape layer_shape (32,9)
- Each expert from expert_min to expert_max (0 to 255) appears at least once in each layer
Args:
num_layers: Number of layers, default 58
layer_shape: Shape of a single layer, default (32,9)
expert_min: Minimum expert ID, default 0
expert_max: Maximum expert ID, default 255
Returns:
torch.Tensor: Tensor with shape (num_layers, layer_shape[0], layer_shape[1])
"""
# 1. Basic parameter calculation
expert_num = expert_max - expert_min + 1 # Total number of experts: 256 (0~255)
layer_total = layer_shape[0] * layer_shape[
1] # Total elements in a single layer: 32*9=288
extra_slots = layer_total - expert_num # Number of random positions to fill per layer: 288-256=32
# 2. Verify feasibility (total elements must be ≥ number of experts to cover all experts)
assert layer_total >= expert_num, (
f"Number of elements in a single layer {layer_total} < number of experts {expert_num}, "
"cannot cover all experts")
# 3. Generate layers one by one
layers = []
for _ in range(num_layers):
# 3.1 Generate "complete expert sequence" (ensure each expert from 0 to 255 is included)
full_experts = torch.arange(expert_min,
expert_max + 1,
dtype=torch.int64) # shape (256,)
# 3.2 Generate "supplementary random experts" (fill remaining 32 positions, randomly selected from 0~255)
extra_experts = torch.randint(expert_min,
expert_max + 1,
size=(extra_slots, ),
dtype=torch.int64) # shape (32,)
# 3.3 Concatenate and shuffle (ensure random distribution of experts in each layer)
layer_flat = torch.cat([full_experts, extra_experts],
dim=0) # shape (288,)
# Shuffle order (use randperm to generate random indices to avoid repeated shuffling issues)
shuffle_idx = torch.randperm(layer_flat.shape[0])
layer_shuffled = layer_flat[shuffle_idx]
# 3.4 Reshape to layer_shape (32,9)
layer = layer_shuffled.reshape(layer_shape)
layers.append(layer)
# 4. Stack all layers to get the final tensor
return torch.stack(layers, dim=0) # shape (58,32,9)
def warm_up():
exam_config = DynamicConfig()
exam_config.ep_worldsize = 32
exam_config.num_die_per_host = 16
algo = FlashLB(exam_config)
# Generate target tensor
expert_tensor = generate_layered_experts(num_layers=58,
layer_shape=(32, 9))
algo.rebalance_experts(expert_tensor, torch.randint(1, 1000, (58, 32, 9)))

View File

@@ -0,0 +1,30 @@
# Copyright # Copyright Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
# Todo: Once https://github.com/vllm-project/vllm/pull/24069 is merged in vllm. Remove this policy.
import copy
import random
from .policy_abstract import DynamicConfig, EplbPolicy
random.seed(42)
class RandomLoadBalance(EplbPolicy):
def __init__(self, config: DynamicConfig):
super().__init__(config)
def rebalance_experts(self, current_expert_table, expert_workload):
new_table = copy.deepcopy(current_expert_table)
num_layers = len(current_expert_table)
for i in range(num_layers):
# randomly choose two card
# indices = random.sample(range(num_card), 2)
indices = [3, 1]
# swap redundant experts
expert_id_to_exchange = new_table[i][indices[0]][-1].clone()
new_table[i][indices[0]][-1] = new_table[i][indices[1]][-1]
new_table[i][indices[1]][-1] = expert_id_to_exchange
return 1, [-i for i in range(num_layers)], new_table