您正在查看: Surou 发布的文章

深入OpStack,提现,储值的处理逻辑

OpStack 各个角色

  • op-node 负责和op-geth交易打包落块,交易状态推导,数据传输同步的客户端
  • batcher 将数据同步到L1的EOA账户
  • op-processer 提交区块状态到 L1 的 L2OutputOracle 合约
  • crossDomainMessagener 跨链信使合约,负责L1->L2,L2->L1的通信
  • OptimismPortal 是 op-stack 的充值提现纽带合约
  • Bridges 桥合约,主要功能是承载充值提现
  • L2OutputOracle 在L1层接收L2过来的状态根的合约

L2->L1 提现逻辑

提现的核心步骤

  1. 第一步 用户在L2层调用withdraw给自己地址提币
  2. 第二步 业务逻辑在L2合约层进行处理,中间会经过以下几个合约和步骤
    1.首先会在L2StandradBridge上面执行call_initiateWithdrawal。根据ETH/ERC20
    2.如果提现的是ETH,则会调用CrossDomainMessenger的sendMessage方法,将msgNonce+1,并在方法体内部调用L2CrossDomainMessenger的_sendMessage方法
    3.L2CrossDomainMessenger的_sendMessage 会调用L2ToL1MessagePasser的initateWithdrawal。构造出withdrawalHash,并维护msgNonce自增为1。完事发送事件
  3. 第三步 sequencer中的op-node 监听到交易事件,将事件打包成交易 (此步在链下处理)
  4. 第四步 Op-batch负责发打包好的交易rollup到L1里面,Op-proposer负责将这批次的状态根stateroot提交到L1
  5. 第五步 用户在L1提取资金(但是要注意的是,需要在挑战期过后才能提取),可以使用op-stack-SDK。它内部的逻辑会调用L1层的OptimismPortal来提取资金。

L2链层源码

function _initiateWithdrawal(
    address _l2Token,
    address _from,
    address _to,
    uint256 _amount,
    uint32 _minGasLimit,
    bytes memory _extraData
)
    internal
{
    if (_l2Token == Predeploys.LEGACY_ERC20_ETH) {  // 判断是否是ETH
        _initiateBridgeETH(_from, _to, _amount, _minGasLimit, _extraData);
    } else {
        address l1Token = OptimismMintableERC20(_l2Token).l1Token();  //属于ERC20
        _initiateBridgeERC20(_l2Token, l1Token, _from, _to, _amount, _minGasLimit, _extraData);
    }
}

执行父类的方法,_initiateBridgeETH

function _initiateBridgeETH(
    address _from,
    address _to,
    uint256 _amount,
    uint32 _minGasLimit,
    bytes memory _extraData
)
    internal
{
    require(isCustomGasToken() == false, "StandardBridge: cannot bridge ETH with custom gas token");
    require(msg.value == _amount, "StandardBridge: bridging ETH must include sufficient ETH value");

    _emitETHBridgeInitiated(_from, _to, _amount, _extraData);

    messenger.sendMessage{ value: _amount }({
        _target: address(otherBridge),
        _message: abi.encodeWithSelector(this.finalizeBridgeETH.selector, _from, _to, _amount, _extraData),
        _minGasLimit: _minGasLimit
    });
}

此时,方法进入到CrossDomainMessenger

function sendMessage(address _target, bytes calldata _message, uint32 _minGasLimit) external payable {
    if (isCustomGasToken()) {
        require(msg.value == 0, "CrossDomainMessenger: cannot send value with custom gas token");
    }

    _sendMessage({
        _to: address(otherMessenger),
        _gasLimit: baseGas(_message, _minGasLimit),
        _value: msg.value,
        _data: abi.encodeWithSelector(
            this.relayMessage.selector, messageNonce(), msg.sender, _target, msg.value, _minGasLimit, _message
        )
    });

    emit SentMessage(_target, msg.sender, _message, messageNonce(), _minGasLimit);
    emit SentMessageExtension1(msg.sender, msg.value);

    unchecked {
        ++msgNonce;
    }
}

在调用_sendMessage 的时候,执行的是子类的_sendMessage

function _sendMessage(address _to, uint64 _gasLimit, uint256 _value, bytes memory _data) internal override {
    IL2ToL1MessagePasser(payable(Predeploys.L2_TO_L1_MESSAGE_PASSER)).initiateWithdrawal{ value: _value }(
        _to, _gasLimit, _data
    );
}

最终执行的是L2ToL1MessagePasser,逻辑是将执行交易的参数打包成hash,并发送事件,到这里,L2层的逻辑已经执行完毕。

function initiateWithdrawal(address _target, uint256 _gasLimit, bytes memory _data) public payable {
    bytes32 withdrawalHash = Hashing.hashWithdrawal(
        Types.WithdrawalTransaction({
            nonce: messageNonce(),
            sender: msg.sender,
            target: _target,
            value: msg.value,
            gasLimit: _gasLimit,
            data: _data
        })
    );

    sentMessages[withdrawalHash] = true;

    emit MessagePassed(messageNonce(), msg.sender, _target, msg.value, _gasLimit, _data, withdrawalHash);

    unchecked {
        ++msgNonce;
    }
}

链下执行逻辑

func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) error {
    // 获取并判断需要提交的最新 L2 的 start 和 end 块号
    start, end, err := l.calculateL2BlockRangeToStore(ctx)
    if err != nil {
       l.Log.Warn("Error calculating L2 block range", "err", err)
       return err
    } else if start.Number >= end.Number {
       return errors.New("start number is >= end number")
    }

    var latestBlock *types.Block
    // 从起始区块开始获取区块信息,并将区块加到 channelManager 的 blocks
    for i := start.Number + 1; i < end.Number+1; i++ {
       //核心逻辑就是 l.loadBlockIntoState
       block, err := l.loadBlockIntoState(ctx, i)
       if errors.Is(err, ErrReorg) {
          l.Log.Warn("Found L2 reorg", "block_number", i)
          l.lastStoredBlock = eth.BlockID{}
          return err
       } else if err != nil {
          l.Log.Warn("Failed to load block into state", "err", err)
          return err
       }
       l.lastStoredBlock = eth.ToBlockID(block)
       latestBlock = block
    }
    // 提取基本的 L2BlockRef 信息
    l2ref, err := derive.L2BlockToBlockRef(l.RollupConfig, latestBlock)
    if err != nil {
       l.Log.Warn("Invalid L2 block loaded into state", "err", err)
       return err
    }
    // 将L2BlockRef 加载到当前状态根中
    l.Metr.RecordL2BlocksLoaded(l2ref)
    return nil
}
func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error {
    // 获取当前 Layer 1 的最新区块(tip)
    l1tip, err := l.l1Tip(ctx)
    if err != nil {
       l.Log.Error("Failed to query L1 tip", "err", err)
       return err
    }
    // 记录当前的 l1tip
    l.recordL1Tip(l1tip)

    // 从状态中获取与当前 L1 tip 相关的交易数据。这一步比较关键,来看一下逻辑
    txdata, err := l.state.TxData(l1tip.ID())

    if err == io.EOF {
       l.Log.Trace("No transaction data available")
       return err
    } else if err != nil {
       l.Log.Error("Unable to get tx data", "err", err)
       return err
    }

    // 发送交易数据到L1  
    if err = l.sendTransaction(txdata, queue, receiptsCh, daGroup); err != nil {
       return fmt.Errorf("BatchSubmitter.sendTransaction failed: %w", err)
    }
    return nil
}

获取与当前L1 tip 相关的交易数据

func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) {
    s.mu.Lock()
    defer s.mu.Unlock()
    // 上面的代码逻辑是设置互斥锁
    var firstWithTxData *channel
    // 寻找第一个包含交易数据的通道
    for _, ch := range s.channelQueue {
       if ch.HasTxData() {
          firstWithTxData = ch
          break
       }
    }

    dataPending := firstWithTxData != nil && firstWithTxData.HasTxData()
    s.log.Debug("Requested tx data", "l1Head", l1Head, "txdata_pending", dataPending, "blocks_pending", len(s.blocks))

    // 存在待处理数据或达成短路条件,则调用 nextTxData(firstWithTxData) 返回该通道的交易数据
    if dataPending || s.closed {
       return s.nextTxData(firstWithTxData)
    }

    // 没有待处理数据,我们可以添加一个新块到channel,同时返回一个EOF
    if len(s.blocks) == 0 {
       return txData{}, io.EOF
    }

    // 确保当前有足够的空间处理新块
    if err := s.ensureChannelWithSpace(l1Head); err != nil {
       return txData{}, err
    }

    // 处理待处理的块
    if err := s.processBlocks(); err != nil {
       return txData{}, err
    }

    // 处理完所有待处理的块后,注册当前的 L1 头
    s.registerL1Block(l1Head)

    // 将处理后的数据输出
    if err := s.outputFrames(); err != nil {
       return txData{}, err
    }
    // 返回当前通道的交易数据
    return s.nextTxData(s.currentChannel)
}

op-proposer 逻辑,发送状态根

func (l *L2OutputSubmitter) FetchL2OOOutput(ctx context.Context) (*eth.OutputResponse, bool, error) {
    if l.l2ooContract == nil {
       return nil, false, fmt.Errorf("L2OutputOracle contract not set, cannot fetch next output info")
    }

    cCtx, cancel := context.WithTimeout(ctx, l.Cfg.NetworkTimeout)
    defer cancel()
    callOpts := &bind.CallOpts{
       From:    l.Txmgr.From(),
       Context: cCtx,
    }
    // 获取下一个检查点的区块号
    nextCheckpointBlockBig, err := l.l2ooContract.NextBlockNumber(callOpts)
    if err != nil {
       return nil, false, fmt.Errorf("querying next block number: %w", err)
    }
    nextCheckpointBlock := nextCheckpointBlockBig.Uint64()
    // 方法获取当前区块号
    currentBlockNumber, err := l.FetchCurrentBlockNumber(ctx)
    if err != nil {
       return nil, false, err
    }

    // 对比当前区块号和下一个检查点的区块号,确保不会在未来的时间提交区块
    if currentBlockNumber < nextCheckpointBlock {
       l.Log.Debug("Proposer submission interval has not elapsed", "currentBlockNumber", currentBlockNumber, "nextBlockNumber", nextCheckpointBlock)
       return nil, false, nil
    }
    //使用下一个检查点的区块号来获取输出信息
    output, err := l.FetchOutput(ctx, nextCheckpointBlock)
    if err != nil {
       return nil, false, fmt.Errorf("fetching output: %w", err)
    }

    // 检查输出信息的区块引用是否大于最终化的 L2 状态的区块号,且是否允许非最终化的状态
    if output.BlockRef.Number > output.Status.FinalizedL2.Number && (!l.Cfg.AllowNonFinalized || output.BlockRef.Number > output.Status.SafeL2.Number) {
       l.Log.Debug("Not proposing yet, L2 block is not ready for proposal",
          "l2_proposal", output.BlockRef,
          "l2_safe", output.Status.SafeL2,
          "l2_finalized", output.Status.FinalizedL2,
          "allow_non_finalized", l.Cfg.AllowNonFinalized)
       return output, false, nil
    }
    return output, true, nil
}
func (l *L2OutputSubmitter) proposeOutput(ctx context.Context, output *eth.OutputResponse) {
    cCtx, cancel := context.WithTimeout(ctx, 10*time.Minute)
    defer cancel()
    //  如果上述的检查结果为true,则直接提交状态根transaction
    if err := l.sendTransaction(cCtx, output); err != nil {
       l.Log.Error("Failed to send proposal transaction",
          "err", err,
          "l1blocknum", output.Status.CurrentL1.Number,
          "l1blockhash", output.Status.CurrentL1.Hash,
          "l1head", output.Status.HeadL1.Number)
       return
    }
    l.Metr.RecordL2BlocksProposed(output.BlockRef)
}

至此,链下部分也已经处理完成

L1层的处理

在L2OutputOracle.proposeL2Output方法中

function proposeL2Output(
    bytes32 _outputRoot,
    uint256 _l2BlockNumber,
    bytes32 _l1BlockHash,
    uint256 _l1BlockNumber
)
    external
    payable
{
    // 校验发送人
    require(msg.sender == proposer, "L2OutputOracle: only the proposer address can propose new outputs");
    // 校验下一区块号
    require(
        _l2BlockNumber == nextBlockNumber(),
        "L2OutputOracle: block number must be equal to next expected block number"
    );
    // 校验区块时间
    require(
        computeL2Timestamp(_l2BlockNumber) < block.timestamp,
        "L2OutputOracle: cannot propose L2 output in the future"
    );

    require(_outputRoot != bytes32(0), "L2OutputOracle: L2 output proposal cannot be the zero hash");

    if (_l1BlockHash != bytes32(0)) {
        require(
            blockhash(_l1BlockNumber) == _l1BlockHash,
            "L2OutputOracle: block hash does not match the hash at the expected height"
        );
    }

    emit OutputProposed(_outputRoot, nextOutputIndex(), _l2BlockNumber, block.timestamp);
    // 将对应的状态根方入到l2Outputs中
    l2Outputs.push(
        Types.OutputProposal({
            outputRoot: _outputRoot,
            timestamp: uint128(block.timestamp),
            l2BlockNumber: uint128(_l2BlockNumber)
        })
    );
}

结合着时序图来看一下

L1-> L2 储值逻辑

储值的核心步骤

第一步 用户在L1层发起储值
第二步 用户会在L1链上经历几个核心步骤
1.先进入L1StandardBridge,执行_initiateETHDeposit
2.调用 CrossDomainMessenger 合约的 sendMessage
3.在CrossDomainMessenger.sendMessage 方法中,内部调用L1CrossDomainMessenger的_sendMessage方法,同时维护msgNonce
4.L1CrossDomainMessenger._sendMessage 会抛出TransactionDeposited 事件,至此,L1链执行处理完毕
第三步 链下,op-node监听到TransactionDeposited,构建交易的参数,并让op-geth调用L2StandardBridge的finalizeDeposit
第四步 finalizeDeposit执行完成之后,整个充值链路就完成了。

储值在L1层的源码

function depositETH(uint32 _minGasLimit, bytes calldata _extraData) external payable onlyEOA {
    _initiateETHDeposit(msg.sender, msg.sender, _minGasLimit, _extraData);
}

调用父类StandardBridge

function _initiateBridgeETH(
    address _from,
    address _to,
    uint256 _amount,
    uint32 _minGasLimit,
    bytes memory _extraData
)
    internal
{
    require(isCustomGasToken() == false, "StandardBridge: cannot bridge ETH with custom gas token");
    require(msg.value == _amount, "StandardBridge: bridging ETH must include sufficient ETH value");

    // Emit the correct events. By default this will be _amount, but child
    // contracts may override this function in order to emit legacy events as well.
    _emitETHBridgeInitiated(_from, _to, _amount, _extraData);
    // 发送message信息,进入的是CrossDomainMessenger合约中
    messenger.sendMessage{ value: _amount }({
        _target: address(otherBridge),
        _message: abi.encodeWithSelector(this.finalizeBridgeETH.selector, _from, _to, _amount, _extraData),
        _minGasLimit: _minGasLimit
    });
}

逻辑和提现一致,调用_sendMessage方法,此方法是执行子类L1CrossDomainMessenger的重写方法

function sendMessage(address _target, bytes calldata _message, uint32 _minGasLimit) external payable {
    if (isCustomGasToken()) {
        require(msg.value == 0, "CrossDomainMessenger: cannot send value with custom gas token");
    }

    // Triggers a message to the other messenger. Note that the amount of gas provided to the
    // message is the amount of gas requested by the user PLUS the base gas value. We want to
    // guarantee the property that the call to the target contract will always have at least
    // the minimum gas limit specified by the user.
    _sendMessage({
        _to: address(otherMessenger),
        _gasLimit: baseGas(_message, _minGasLimit),
        _value: msg.value,
        _data: abi.encodeWithSelector(
            this.relayMessage.selector, messageNonce(), msg.sender, _target, msg.value, _minGasLimit, _message
        )
    });

    emit SentMessage(_target, msg.sender, _message, messageNonce(), _minGasLimit);
    emit SentMessageExtension1(msg.sender, msg.value);

    unchecked {
        ++msgNonce;
    }
}
function _sendMessage(address _to, uint64 _gasLimit, uint256 _value, bytes memory _data) internal override {
    portal.depositTransaction{ value: _value }({
        _to: _to,
        _value: _value,
        _gasLimit: _gasLimit,
        _isCreation: false,
        _data: _data
    });
}

进入到OptimismPortal._depositTransaction方法

function _depositTransaction(
    address _to,
    uint256 _mint,
    uint256 _value,
    uint64 _gasLimit,
    bool _isCreation,
    bytes memory _data
)
    internal
{
    if (_isCreation && _to != address(0)) revert BadTarget();

    if (_gasLimit < minimumGasLimit(uint64(_data.length))) revert SmallGasLimit();

    if (_data.length > 120_000) revert LargeCalldata();

    // Transform the from-address to its alias if the caller is a contract.
    address from = msg.sender;
    if (msg.sender != tx.origin) {
        from = AddressAliasHelper.applyL1ToL2Alias(msg.sender);
    }

    // 对交易参数进行打包
    bytes memory opaqueData = abi.encodePacked(_mint, _value, _gasLimit, _isCreation, _data);

    // 发送存款事件
    emit TransactionDeposited(from, _to, DEPOSIT_VERSION, opaqueData);
}

以上,在L1层的存款逻辑处理完毕

链下处理

负责整合来自 L1 的信息、处理存款事务以及确保所有数据在时间和逻辑上的一致性。它确保生成的 L2 区块能够正确反映 L1 的状态

func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Context, l2Parent eth.L2BlockRef, epoch eth.BlockID) (attrs *eth.PayloadAttributes, err error) {
    var l1Info eth.BlockInfo
    var depositTxs []hexutil.Bytes
    var seqNumber uint64

    sysConfig, err := ba.l2.SystemConfigByL2Hash(ctx, l2Parent.Hash)
    if err != nil {
       return nil, NewTemporaryError(fmt.Errorf("failed to retrieve L2 parent block: %w", err))
    }

    // If the L1 origin changed in this block, then we are in the first block of the epoch. In this
    // case we need to fetch all transaction receipts from the L1 origin block so we can scan for
    // user deposits.
    if l2Parent.L1Origin.Number != epoch.Number {
       info, receipts, err := ba.l1.FetchReceipts(ctx, epoch.Hash)
       if err != nil {
          return nil, NewTemporaryError(fmt.Errorf("failed to fetch L1 block info and receipts: %w", err))
       }
       if l2Parent.L1Origin.Hash != info.ParentHash() {
          return nil, NewResetError(
             fmt.Errorf("cannot create new block with L1 origin %s (parent %s) on top of L1 origin %s",
                epoch, info.ParentHash(), l2Parent.L1Origin))
       }

       deposits, err := DeriveDeposits(receipts, ba.rollupCfg.DepositContractAddress)
       if err != nil {
          // deposits may never be ignored. Failing to process them is a critical error.
          return nil, NewCriticalError(fmt.Errorf("failed to derive some deposits: %w", err))
       }
       // apply sysCfg changes
       if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, ba.rollupCfg, info.Time()); err != nil {
          return nil, NewCriticalError(fmt.Errorf("failed to apply derived L1 sysCfg updates: %w", err))
       }

       l1Info = info
       depositTxs = deposits
       seqNumber = 0
    } else {
       if l2Parent.L1Origin.Hash != epoch.Hash {
          return nil, NewResetError(fmt.Errorf("cannot create new block with L1 origin %s in conflict with L1 origin %s", epoch, l2Parent.L1Origin))
       }
       info, err := ba.l1.InfoByHash(ctx, epoch.Hash)
       if err != nil {
          return nil, NewTemporaryError(fmt.Errorf("failed to fetch L1 block info: %w", err))
       }
       l1Info = info
       depositTxs = nil
       seqNumber = l2Parent.SequenceNumber + 1
    }

    // Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2
    nextL2Time := l2Parent.Time + ba.rollupCfg.BlockTime
    if nextL2Time < l1Info.Time() {
       return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d",
          l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time()))
    }

    var upgradeTxs []hexutil.Bytes
    if ba.rollupCfg.IsEcotoneActivationBlock(nextL2Time) {
       upgradeTxs, err = EcotoneNetworkUpgradeTransactions()
       if err != nil {
          return nil, NewCriticalError(fmt.Errorf("failed to build ecotone network upgrade txs: %w", err))
       }
    }

    if ba.rollupCfg.IsFjordActivationBlock(nextL2Time) {
       fjord, err := FjordNetworkUpgradeTransactions()
       if err != nil {
          return nil, NewCriticalError(fmt.Errorf("failed to build fjord network upgrade txs: %w", err))
       }
       upgradeTxs = append(upgradeTxs, fjord...)
    }

    l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time)
    if err != nil {
       return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err))
    }

    var afterForceIncludeTxs []hexutil.Bytes
    if ba.rollupCfg.IsInterop(nextL2Time) {
       depositsCompleteTx, err := DepositsCompleteBytes(seqNumber, l1Info)
       if err != nil {
          return nil, NewCriticalError(fmt.Errorf("failed to create depositsCompleteTx: %w", err))
       }
       afterForceIncludeTxs = append(afterForceIncludeTxs, depositsCompleteTx)
    }

    txs := make([]hexutil.Bytes, 0, 1+len(depositTxs)+len(afterForceIncludeTxs)+len(upgradeTxs))
    txs = append(txs, l1InfoTx)
    txs = append(txs, depositTxs...)
    txs = append(txs, afterForceIncludeTxs...)
    txs = append(txs, upgradeTxs...)

    var withdrawals *types.Withdrawals
    if ba.rollupCfg.IsCanyon(nextL2Time) {
       withdrawals = &types.Withdrawals{}
    }

    var parentBeaconRoot *common.Hash
    if ba.rollupCfg.IsEcotone(nextL2Time) {
       parentBeaconRoot = l1Info.ParentBeaconRoot()
       if parentBeaconRoot == nil { // default to zero hash if there is no beacon-block-root available
          parentBeaconRoot = new(common.Hash)
       }
    }

    return &eth.PayloadAttributes{
       Timestamp:             hexutil.Uint64(nextL2Time),
       PrevRandao:            eth.Bytes32(l1Info.MixDigest()),
       SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr,
       Transactions:          txs,
       NoTxPool:              true,
       GasLimit:              (*eth.Uint64Quantity)(&sysConfig.GasLimit),
       Withdrawals:           withdrawals,
       ParentBeaconBlockRoot: parentBeaconRoot,
    }, nil
}

L2层的最终处理逻辑,进行消息的转发

function relayMessage(
    uint256 _nonce,
    address _sender,
    address _target,
    uint256 _value,
    uint256 _minGasLimit,
    bytes calldata _message
)
    external
    payable
{
    // 确保状态不是暂停
    require(paused() == false, "CrossDomainMessenger: paused");

    // 确保版本正确
    (, uint16 version) = Encoding.decodeVersionedNonce(_nonce);
    require(version < 2, "CrossDomainMessenger: only version 0 or 1 messages are supported at this time");

    // 检查该消息是否已经被转发,防止重复转发
    if (version == 0) {
        bytes32 oldHash = Hashing.hashCrossDomainMessageV0(_target, _sender, _message, _nonce);
        require(successfulMessages[oldHash] == false, "CrossDomainMessenger: legacy withdrawal already relayed");
    }

    // 使用版本 1 的哈希作为消息的唯一标识符.
    bytes32 versionedHash =
        Hashing.hashCrossDomainMessageV1(_nonce, _sender, _target, _value, _minGasLimit, _message);

    if (_isOtherMessenger()) {
        // 确保 msg.value 与 _value 匹配
        assert(msg.value == _value);
        assert(!failedMessages[versionedHash]);
    } else {
        require(msg.value == 0, "CrossDomainMessenger: value must be zero unless message is from a system address");

        require(failedMessages[versionedHash], "CrossDomainMessenger: message cannot be replayed");
    }
    // 确保地址安全
    require(
        _isUnsafeTarget(_target) == false, "CrossDomainMessenger: cannot send message to blocked system address"
    );

    require(successfulMessages[versionedHash] == false, "CrossDomainMessenger: message has already been relayed");

    //确保有足够的燃气执行外部调用和完成执行,若不够,则将消息标记为失败
    if (
        !SafeCall.hasMinGas(_minGasLimit, RELAY_RESERVED_GAS + RELAY_GAS_CHECK_BUFFER)
            || xDomainMsgSender != Constants.DEFAULT_L2_SENDER
    ) {
        failedMessages[versionedHash] = true;
        emit FailedRelayedMessage(versionedHash);

        // Revert in this case if the transaction was triggered by the estimation address. This
        // should only be possible during gas estimation or we have bigger problems. Reverting
        // here will make the behavior of gas estimation change such that the gas limit
        // computed will be the amount required to relay the message, even if that amount is
        // greater than the minimum gas limit specified by the user.
        if (tx.origin == Constants.ESTIMATION_ADDRESS) {
            revert("CrossDomainMessenger: failed to relay message");
        }

        return;
    }
    // 最核心的逻辑,执行SafeCall.call来转发执行逻辑
    xDomainMsgSender = _sender;
    bool success = SafeCall.call(_target, gasleft() - RELAY_RESERVED_GAS, _value, _message);
    xDomainMsgSender = Constants.DEFAULT_L2_SENDER;

    // 根据执行结果处理最终的逻辑
    if (success) {
        assert(successfulMessages[versionedHash] == false);
        successfulMessages[versionedHash] = true;
        emit RelayedMessage(versionedHash);
    } else {
        failedMessages[versionedHash] = true;
        emit FailedRelayedMessage(versionedHash);

        if (tx.origin == Constants.ESTIMATION_ADDRESS) {
            revert("CrossDomainMessenger: failed to relay message");
        }
    }
}

时序图

参考文献

https://docs.optimism.io/stack/protocol/rollup/withdrawal-flow
https://learnblockchain.cn/article/9207

转载:https://learnblockchain.cn/article/9419

云原生网关Traefik和Nginx Proxy Manager初体验

简介

Traefik是一个反向代理和负载均衡器,主要用于云原生场景,可自动识别Kubernetes、Docker、Docker Swarm等等技术下部署的服务,通过label标签自动生成反向代理路由规则。Traefik Github仓库 star是42.6k,官方文档地址:doc.traefik.io/traefik/
Nginx Proxy Manager 基于Nginx提供了一个管理后台,用户可以很轻松的对Nginx进行可视化的配置,内置了ssl 证书的自动化生成和管理。NPM Github仓库star是12.9k,官方文档地址:nginxproxymanager.com/

初体验环境说明

初次使用主要体验Traefik和NPM的基本功能,所以使用docker compose 方式快速搭建环境。

自定义网络

从 Docker 1.10 版本开始,docker daemon 实现了一个内嵌的 DNS server,使容器可以直接通过容器名称通信,但是需要使用自定义的网络。 所以这里我们先创建自定义网络: custom_net

docker create network custom_network #默认是bridge桥接网络

当然,也可以直接在docker compose yaml中定义,让它自行生成

# 创建自定义网络
networks:
  custom_net:
    driver: bridge
    name: custom_net

# 引入已创建好的网络
#networks:
#  custom_net:
#    external: true

Traefik 使用

Traefik的配置分为静态配置和动态配置

  • 静态配置一般使用命令行参数,一次配置后基本不用修改
  • 动态配置用于配置路由规则、中间件(处理/转换请求)、负载均衡等,支持Docker、File、Ingress、Consul、Etcd、ZooKeeper、Redis、Http等多种方式

docker compose 配置如下:

version: "3"
services:
  traefik:
    image: traefik:2.9
    command: 
      - "--log.level=DEBUG"
      - "--api.insecure=true"
      - "--api.dashboard=true" 
      - "--providers.docker=true"
      - "--providers.file.directory=/traefik/conf"
      - "--providers.file.watch=true"
      - "--providers.docker.exposedByDefault=false"
    ports:
      # The HTTP port
      - "80:80"
      # The Web UI (enabled by --api)
      - "81:8080"
    environment:
      TZ: Asia/Shanghai
    volumes:
      # So that Traefik can listen to the Docker events
      - "/var/run/docker.sock:/var/run/docker.sock:ro"
      - "/root/docker/traefik/conf:/traefik/conf"
    networks:
      - custom_net

# 引入已创建好的网络
networks:
  custom_net:
    external: true

配置说明

  • api.dashboard=true 开启dashboard管理台,方便查看生效的配置
  • providers.docker=true 开启docker容器label自动感知
  • providers.docker.exposedByDefault=false 关闭无label的感知
  • providers.file.watch=true 开启File文件模式的动态配置感知
  • providers.file.directory 配置动态配置的文件路径

以上配置主要是方便接下来以Docker和File两种方式来演示Traefik自动创建路由转发规则

Dashboard展示

配置好后yaml后,使用docker compose up -d启动,访问http://localhost:81/dashboard 查看Dashboard面板

  1. Providers中Docker、File说明开启了这2种动态配置
  2. Entrypoints的80和8080是Traefik的流量入口
  3. HTTP、TCP、UDP三种路由规则的可视化
  4. Features是Traefik自身的功能:链路追踪、监控指标、访问日志、Hub扩展能力

Docker label自动感知

以官方的whoami镜像测试

version: '3'
services:
  whoami:
    # A container that exposes an API to show its IP address
    image: traefik/whoami
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.whoami.rule=PathPrefix(`/test`)"
    networks:
      - custom_net
# 引入已创建好的网络
networks:
  custom_net:
    external: true

只需要配置lables ,容器启动后,Traefik即可自动感知并添加路由转发规则 /test -> whoami 容器
PathPrefix匹配以xxx开头的请求,更多匹配规则可查询Traefik官方-Routers配置
从dashboard中可以看到流量路由的流转情况:
从Traefik的80端口为入口,经过/test为前缀匹配的HTTP Router,到达whoami容器服务Service暴露的80端口


因为Traefik和whoami同一网络下(custom_net),所以whoami不需要对外暴露业务端口,Traefik会自动转到whoami的容器内端口。

上面的yaml配置中并没有暴露whoami的端口

Traefik也可以自动识别多个实例,自动完成负载均衡。
使用docker compose up -d --scale whoami=3 扩容到3个whoami容器,然后多次访问http://127.0.0.1/test,从返回的信息里面可以看到会轮询调用whoami容器




如果是直接使用docker run启动容器,可以这样加label

docker run -d --label 'traefik.http.routers.whoami.rule=PathPrefix(`/test`)' \
 --label traefik.enable=true \
 --network mynet \
 --name whoami-whoami traefik/whoami 

docker compose中启动的容器名为工程名称-服务条目名称-序号,这里--name whoami-whoami 是为了让docker run启动的容器自动归类到上面docker compose启动的services中

File配置文件动态感知

前面介绍了Docker label的方式自动添加路由规则到Traefik,现在也介绍一下File文件的方式。
Traefik会自动watch静态配置providers.file.directory=/traefik/conf路径,可以将其挂载出来/root/docker/traefik/conf:/traefik/conf,然后在conf下新增yaml文件即可

swagger.yaml

http:
  routers:
    s-user:
      service: s-user
      middlewares:
        - "replaceRegex1"
      rule: PathPrefix(`/s-user`)
    s-order:
      service: s-order
      middlewares:
        - "replaceRegex2"
      rule: PathPrefix(`/s-order`)
    s-product:
      service: s-product
      middlewares:
        - "replaceRegex3"
      rule: PathPrefix(`/s-product`)

  middlewares:
    replaceRegex1:
      replacePathRegex:
        regex: "^/s-user/(.*)"
        replacement: "/$1"
    replaceRegex2:
      replacePathRegex:
        regex: "^/s-order/(.*)"
        replacement: "/$1"
    replaceRegex3:
      replacePathRegex:
        regex: "^/s-product/(.*)"
        replacement: "/$1"

  services:
    s-user:
      loadBalancer:
        servers:
        - url: http://swagger-user:8080
    s-order:
      loadBalancer:
        servers:
        - url: http://swagger-order:8080
    s-product:
      loadBalancer:
        servers:
        - url: http://swagger-product:8080

配置解释:
routers 配置url匹配规则,PathPrefix是前缀匹配
middlewares 配置中间件,处理url路径;replacePathRegex是官方提供的用于正则转换,以replaceRegex1为例,将/s-user/ 这一段替换为 /
services 配置负载均衡,swagger-user为docker compose中配置的服务,利用Docker自带的DNS功能,通过服务名访问容器

上面配置的作用:即通过Traefik将80端口的请求url /s-user/xxx 替换为 /xxx ,转发到容器内部的swagger-user服务的8080端口

示例中的后端服务是3个swagger-ui镜像服务,启动后会读取xx.json内容形成swagger接口文档,此处不关注xx.json的具体内容,仅作用法参考

version: '3'
services:
  swagger-user:
    image: 'swaggerapi/swagger-ui'
    ports:
      - '8083:8080'
    environment:
      SWAGGER_JSON: /swagger/user.json
    volumes:
      - /mnt/d/docker/swagger-json:/swagger
    networks:
      - custom_net

  swagger-order:
    image: 'swaggerapi/swagger-ui'
    ports:
      - '8085:8080'
    environment:
      SWAGGER_JSON: /swagger/order.json
    volumes:
      - /mnt/d/docker/swagger-json:/swagger
    networks:
      - custom_net

  swagger-product:
    image: 'swaggerapi/swagger-ui'
    ports:
      - '8084:8080'
    environment:
      SWAGGER_JSON: /swagger/product.json
    volumes:
      - /mnt/d/docker/swagger-json:/swagger
    networks:
      - custom_net

networks:
  custom_net:
    external: true

其他动态配置如使用Etcd、Consul、ZooKeeper,配置格式与File类似。
File文件配置支持Traefik和业务服务都不重启的情况下动态生效路由配置,File watch底层是使用fsnotify,对于少数系统不支持动态感知,如windows wsl ubuntu

Nginx Proxy Manager 使用

NPM需要用到数据库来存储代理转发规则等数据,控制台支持禁用某个路由转发规则,其实就是通过数据库来暂存配置数据实现。 这里直接使用MySQL来初体验。docker-compose.yaml内容如下:

version: '3'
services:
  nginx-proxy-manager:
    image: 'jc21/nginx-proxy-manager:latest'
    restart: always
    ports:
      # These ports are in format <host-port>:<container-port>
      - '80:80' # Public HTTP Port
      - '443:443' # Public HTTPS Port
      - '81:81' # Admin Web Port
      # Add any other Stream port you want to expose
      # - '21:21' # FTP
    environment:
      # Mysql/Maria connection parameters:
      DB_MYSQL_HOST: mysql
      DB_MYSQL_PORT: 3306
      DB_MYSQL_USER: "npm"
      DB_MYSQL_PASSWORD: "npm"
      DB_MYSQL_NAME: "npm"
      # Uncomment this if IPv6 is not enabled on your host
      # DISABLE_IPV6: 'true'
      TZ: Asia/Shanghai
    volumes:
      - ./data:/data
      - ./letsencrypt:/etc/letsencrypt
    networks:
      - custom_net
    depends_on:
      - mysql

  mysql:
    image: mysql:5.7.16
    container_name: mysql
    restart: always
    ports:
      - "3306:3306"
    volumes:
      - /mnt/d/docker/docker-compose/mysql/data:/var/lib/mysql
    environment:
      MYSQL_ROOT_PASSWORD: "123456"
      TZ: Asia/Shanghai
    #最大内存限制
    deploy:
      resources:
        limits:
          memory: 256M
    networks:
      - custom_net
    privileged: true
    command:
      --server_id=100
      --log-bin=mysql-bin
      --binlog_format=mixed
      --expire_logs_days=7
      --default-authentication-plugin=mysql_native_password
      --character-set-server=utf8mb4
      --collation-server=utf8mb4_general_ci
      --max_allowed_packet=16M
      --sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
      --max_connections=10000
      --slow_query_log=ON
      --long_query_time=1
      --log_queries_not_using_indexes=ON
      --lower_case_table_names=1
      --explicit_defaults_for_timestamp=true

# 引入已创建好的网络
networks:
  custom_net:
    external: true

使用docker compose up -d启动后,记得先进入MySQL创建用户、账号、密码、数据库,都是叫npm

可以通过NPM环境变量中的DB_MYSQL_USER、DB_MYSQL_NAME等配置自行指定 因为在同一容器网络下DB_MYSQL_HOST可以直接使用mysql访问

启动完成后,可以访问localhost:81 进入控制台,首次登录需要修改账号/密码。默认的账号/密码是admin@example.com/changeme
以whoami为例,配置路由规则接收来源是localhost、url是/test开头的请求

测试环境是windows wsl ubuntu环境,所以使用localhost,如果是云服务器,直接填写其公网ip即可

然后在Custom locations里面添加一个location

因为之前已经启动了多个whoami容器,所以此时可以直接访问http://localhost/test,多次访问也是轮询调用多个容器实例

配置静态资源

既然是基于Nginx实现,那么NPM肯定可以配置静态资源访问,这也是NPM和Traefik的一个区别,Traefik更多是做反向代理的用途。
上面已经把data目录挂载出来了,所以可以直接在data目录下新建test目录,新增123.txt、test.html文件
接下来在控制台配置静态资源访问,配置入口:Proxy Host -> Advanced -> Custom Nginx Configuration

location /files {
  alias /data/html/;
  autoindex on;
}

然后访问 http://localhost/files/

也可以直接访问 http://localhost/files/123.txt 查看文件内容

配置upstream负载均衡

upstream在Nginx中配置示例如下

upstream backend {
    server backend1.example.com:8080;
    server backend2.example.com:8080;
}
server {
    location / {
        proxy_pass http://backend;
    }
}

直接在NPM控制台的location里面配置不了upstream,我们可以找到其提供的自定义配置入口。 通过查阅NPM官方文档-高级配置,可以知道自定义配置路径是 /data/nginx/custom 其中http块配置可以用下面2个,选其一即可

/data/nginx/custom/http_top.conf #包含在主 http 块的顶部
/data/nginx/custom/http.conf #包含在主 http 块的末尾
  1. 在挂载路径 data/nginx下新建custom目录,在下面新建http.conf文件

    upstream whoami-upstream {
    server 172.19.0.3:80;
    server 172.19.0.4:80;
    server 172.19.0.5:80;
    }

    这里直接配置whoami的容器ip,方便测试

  2. 在控制台Advanced配置里自定义 location

    location /test-lb {
    proxy_pass http://whoami-upstream;
    }


    然后访问 http://localhost/test-lb 测试,请求仍然会轮询调用whoami服务。

NPM实现原理

既然NPM是基于Nginx,那么只要找到Nginx的默认配置,即可知道它的实现原理。本质上是对Nginx配置的管理。 执行docker compose logs,从NPM的启动日志可以看到默认的配置路径:

nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/production.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/ssl-ciphers.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/proxy.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/force-ssl.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/block-exploits.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/assets.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/letsencrypt-acme-challenge.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/ip_ranges.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/include/resolvers.conf
nginx-proxy-manager-nginx-proxy-manager-1  | - /etc/nginx/conf.d/default.conf

执行docker compose exec nginx-proxy-manager bash 进入容器,查看对应的conf文件内容
default.conf默认监听80、443端口(处理转发请求),production.conf默认监听81端口(处理控制台请求)

nginx主配置文件

路径:/etc/nginx/nginx.conf 内容如下:

# run nginx in foreground
daemon off;
pid /run/nginx/nginx.pid;
user npmuser;

# Set number of worker processes automatically based on number of CPU cores.
worker_processes auto;

# Enables the use of JIT for regular expressions to speed-up their processing.
pcre_jit on;

error_log /data/logs/fallback_error.log warn;

# Includes files with directives to load dynamic modules.
include /etc/nginx/modules/*.conf;

events {
        include /data/nginx/custom/events[.]conf;
}

http {
        include                       /etc/nginx/mime.types;
        default_type                  application/octet-stream;
        sendfile                      on;
        server_tokens                 off;
        tcp_nopush                    on;
        tcp_nodelay                   on;
        client_body_temp_path         /tmp/nginx/body 1 2;
        keepalive_timeout             90s;
        proxy_connect_timeout         90s;
        proxy_send_timeout            90s;
        proxy_read_timeout            90s;
        ssl_prefer_server_ciphers     on;
        gzip                          on;
        proxy_ignore_client_abort     off;
        client_max_body_size          2000m;
        server_names_hash_bucket_size 1024;
        proxy_http_version            1.1;
        proxy_set_header              X-Forwarded-Scheme $scheme;
        proxy_set_header              X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header              Accept-Encoding "";
        proxy_cache                   off;
        proxy_cache_path              /var/lib/nginx/cache/public  levels=1:2 keys_zone=public-cache:30m max_size=192m;
        proxy_cache_path              /var/lib/nginx/cache/private levels=1:2 keys_zone=private-cache:5m max_size=1024m;

        log_format proxy '[$time_local] $upstream_cache_status $upstream_status $status - $request_method $scheme $host "$request_uri" [Client $remote_addr] [Length $body_bytes_sent] [Gzip $gzip_ratio] [Sent-to $server] "$http_user_agent" "$http_referer"';
        log_format standard '[$time_local] $status - $request_method $scheme $host "$request_uri" [Client $remote_addr] [Length $body_bytes_sent] [Gzip $gzip_ratio] "$http_user_agent" "$http_referer"';

        access_log /data/logs/fallback_access.log proxy;

        # Dynamically generated resolvers file
        include /etc/nginx/conf.d/include/resolvers.conf;

        # Default upstream scheme
        map $host $forward_scheme {
                default http;
        }

        # Real IP Determination

        # Local subnets:
        set_real_ip_from 10.0.0.0/8;
        set_real_ip_from 172.16.0.0/12; # Includes Docker subnet
        set_real_ip_from 192.168.0.0/16;
        # NPM generated CDN ip ranges:
        include conf.d/include/ip_ranges.conf;
        # always put the following 2 lines after ip subnets:
        real_ip_header X-Real-IP;
        real_ip_recursive on;

        # Custom
        include /data/nginx/custom/http_top[.]conf;

        # Files generated by NPM
        include /etc/nginx/conf.d/*.conf;
        include /data/nginx/default_host/*.conf;
        include /data/nginx/proxy_host/*.conf;
        include /data/nginx/redirection_host/*.conf;
        include /data/nginx/dead_host/*.conf;
        include /data/nginx/temp/*.conf;

        # Custom
        include /data/nginx/custom/http[.]conf;
}

stream {
        # Files generated by NPM
        include /data/nginx/stream/*.conf;

        # Custom
        include /data/nginx/custom/stream[.]conf;
}

# Custom
include /data/nginx/custom/root[.]conf;

从include顺序可以知道,它在http块中先是include /data/nginx/custom/http_top[.]conf
然后 include /data/nginx/proxy_host/*.conf,而控制台对应的配置就在这个data/nginx目录
最后include /data/nginx/custom/http[.]conf。这样也就对应了官方文档说明的:

/data/nginx/custom/http_top.conf #包含在主 http 块的顶部
/data/nginx/custom/http.conf #包含在主 http 块的末尾

自定义配置

以代理配置为例,查看data/nginx/proxy_host 目录下有一个 1.conf,其内容即对应我们的自定义配置

# ------------------------------------------------------------
# localhost
# ------------------------------------------------------------
server {
  set $forward_scheme http;
  set $server         "127.0.0.1";
  set $port           80;

  listen 80;
listen [::]:80;

  server_name localhost;


location /files {
  alias /data/html/;
  autoindex on;
}
location /test-lb {
   proxy_pass http://whoami-upstream;
}

  location /test {
    proxy_set_header Host $host;
    proxy_set_header X-Forwarded-Scheme $scheme;
    proxy_set_header X-Forwarded-Proto  $scheme;
    proxy_set_header X-Forwarded-For    $remote_addr;
    proxy_set_header X-Real-IP      $remote_addr;
    proxy_pass       http://whoami:80;

  # Block Exploits
  include conf.d/include/block-exploits.conf;
  }

  location / {
    # Proxy!
    include conf.d/include/proxy.conf;
  }
  # Custom
  include /data/nginx/custom/server_proxy[.]conf;
}

可以看到,它实际是把Proxy Host 中 Custom locations 、Advanded配置合并了。
至此,Traefik和Nginx Proxy Manager的初体验就结束了~
转载:https://juejin.cn/post/7222577873792827451

Nginx 代理管理器

建立联系

在您的网络上公开 Web 服务 · 使用 Let's Encrypt 免费提供 SSL · 设计时充分考虑了安全性 · 非常适合家庭网络

代理主机

公开您的私人网络 Web 服务并在任何地方进行连接。

漂亮的用户界面

基于 Tabler,界面使用起来很愉快。配置服务器从未如此有趣。

免费 SSL

内置的 Let's Encrypt 支持让您可以免费保护您的 Web 服务。证书甚至可以自行更新!

Docker 丰收

Nginx Proxy Manager 以 Docker Image 形式构建,仅需要一个数据库。

多用户

配置其他用户查看或管理自己的主机。具有完全访问权限。

官网:https://nginxproxymanager.com/
用例:https://docs.halo.run/getting-started/install/other/nginxproxymanager/
中文版本:https://github.com/xiaoxinpro/nginx-proxy-manager-zh
负载均衡:https://www.xiaocaicai.com/2023/09/nginxproxymanager-%E9%85%8D%E7%BD%AEupstream-%E8%B4%9F%E8%BD%BD%E5%9D%87%E8%A1%A1/
常见问题-502:https://www.xiaocaicai.com/2024/09/npm%e5%8d%a1%e7%99%bb%e5%bd%95%e9%a1%b5%e9%9d%a2%ef%bc%8c%e6%98%be%e7%a4%babad-gateway/