mirror of
https://github.com/handsomezhuzhu/vllm-npu-plugin.git
synced 2026-02-20 11:42:30 +00:00
- NPUPlatform: device management, HCCL process group, config adaptation - AscendAttentionBackend: npu_fusion_attention (prefill) + npu_incre_flash_attention (decode) - NPUCommunicator: HCCL-based distributed communication - NPUWorker: NPU device init, memory profiling - Custom ops: SiluAndMul, RMS norm, rotary embedding - Plugin registered via vllm.platform_plugins entry point Based on vllm-ascend official pattern, targeting Ascend 910B
18 lines
471 B
Python
18 lines
471 B
Python
"""
|
|
NPU-optimized activation functions for Ascend.
|
|
|
|
Provides ``AscendSiluAndMul`` that uses ``torch_npu.npu_swiglu`` for
|
|
fused SiLU+Mul on NPU devices.
|
|
"""
|
|
|
|
import torch
|
|
from vllm.model_executor.layers.activation import SiluAndMul
|
|
|
|
|
|
class AscendSiluAndMul(SiluAndMul):
|
|
"""SiluAndMul using torch_npu.npu_swiglu on Ascend NPU."""
|
|
|
|
def forward_oot(self, x: torch.Tensor) -> torch.Tensor:
|
|
import torch_npu # noqa: F401
|
|
return torch_npu.npu_swiglu(x)
|