Skip to content

Commit 0e399dc

Browse files
committed
Fix rebase issues
Signed-off-by: Hao Lu <[email protected]>
1 parent 74828f2 commit 0e399dc

38 files changed

+99
-1000
lines changed

cpp/tensorrt_llm/kernels/quantization.cu

+26
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,32 @@ void invokeBatchedFP4Quantization(int b, int m, int n, __nv_fp8_e4m3 const* inpu
277277
}
278278
#endif
279279

280+
__global__ void nvfp4_block_scale_interleave_kernel(
281+
int numbatches, int numRows, int numCols, uint8_t const* SFIn, uint8_t* SFOutput)
282+
{
283+
for (int rowIdx = blockIdx.x; rowIdx < numRows; rowIdx += gridDim.x)
284+
{
285+
for (int batchIdx = 0; batchIdx < numbatches; batchIdx++)
286+
{
287+
for (int colIdx = threadIdx.x; colIdx < numCols; colIdx += blockDim.x)
288+
{
289+
int64_t inOffset = batchIdx * numRows * numCols + rowIdx * numCols + colIdx;
290+
auto sf = SFIn[inOffset];
291+
292+
std::optional<int> batchIdxOpt = batchIdx;
293+
std::optional<int> numRowsOpt = numRows;
294+
295+
// Without batching, the math in get_sf_out_offset is the same as
296+
// int const numSfTilesK = (numCols + 4 - 1) / 4;
297+
// int const tileOffset = ((mi / 128) * numSfTilesK + ki / 4) * 512;
298+
// int const dstIdx = tileOffset + (mi % 32) * 16 + ((mi % 128) / 32) * 4 + ki % 4;
299+
auto dstIdx = get_sf_out_offset_128x4(batchIdxOpt, rowIdx, colIdx, numRowsOpt, numCols * 16);
300+
SFOutput[dstIdx] = sf;
301+
}
302+
}
303+
}
304+
}
305+
280306
// This is intended for weight loading, so m and n are large, b <= 256
281307
void invokeNVFP4BlockScaleInterleave(
282308
int b, int m, int n, uint8_t const* SFIn, uint8_t* SFOutput, int multiProcessorCount, cudaStream_t stream)

cpp/tensorrt_llm/kernels/quantization.cuh

+2-25
Original file line numberDiff line numberDiff line change
@@ -826,30 +826,7 @@ cvt_fp8_to_fp4(
826826
#endif
827827
}
828828

829-
inline __global__ void nvfp4_block_scale_interleave_kernel(
830-
int numbatches, int numRows, int numCols, uint8_t const* SFIn, uint8_t* SFOutput)
831-
{
832-
for (int rowIdx = blockIdx.x; rowIdx < numRows; rowIdx += gridDim.x)
833-
{
834-
for (int batchIdx = 0; batchIdx < numbatches; batchIdx++)
835-
{
836-
for (int colIdx = threadIdx.x; colIdx < numCols; colIdx += blockDim.x)
837-
{
838-
int64_t inOffset = batchIdx * numRows * numCols + rowIdx * numCols + colIdx;
839-
auto sf = SFIn[inOffset];
840-
841-
std::optional<int> batchIdxOpt = batchIdx;
842-
std::optional<int> numRowsOpt = numRows;
843-
844-
// Without batching, the math in get_sf_out_offset is the same as
845-
// int const numSfTilesK = (numCols + 4 - 1) / 4;
846-
// int const tileOffset = ((mi / 128) * numSfTilesK + ki / 4) * 512;
847-
// int const dstIdx = tileOffset + (mi % 32) * 16 + ((mi % 128) / 32) * 4 + ki % 4;
848-
auto dstIdx = get_sf_out_offset_128x4(batchIdxOpt, rowIdx, colIdx, numRowsOpt, numCols * 16);
849-
SFOutput[dstIdx] = sf;
850-
}
851-
}
852-
}
853-
}
829+
__global__ void nvfp4_block_scale_interleave_kernel(
830+
int numbatches, int numRows, int numCols, uint8_t const* SFIn, uint8_t* SFOutput);
854831
} // namespace kernels
855832
} // namespace tensorrt_llm

cpp/tensorrt_llm/kernels/trtllmGenKernels/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,4 +17,4 @@
1717

1818
add_subdirectory(fmha)
1919
add_subdirectory(blockscaleGemm)
20-
add_subdirectory(fp8BlockScaleMoe)
20+
add_subdirectory(blockScaleMoe)

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/gemmCommon.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/gemmCommon.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -190,8 +190,8 @@ inline void checkAndUpdateGemmOptions(
190190
}
191191
if (options.mDtypeElt == tg::Dtype::E4m3 && options.mMmaK != 32)
192192
{
193-
TLLM_LOG_WARNING(
194-
"Unsupported MmaK (", options.mMmaK, ") for ", gemm::toString(options.mDtypeElt), ". Setting MmaK to 32");
193+
TLLM_LOG_WARNING("Unsupported MmaK (", options.mMmaK, ") for ", gemm::toString(options.mDtypeElt).c_str(),
194+
". Setting MmaK to 32");
195195
options.mMmaK = 32;
196196
options.mTileK = std::max(options.mMmaK, options.mTileK);
197197
}
@@ -205,15 +205,15 @@ inline void checkAndUpdateGemmOptions(
205205
if (options.mMmaK != 64)
206206
{
207207
int newTileK = 64 * divUp(options.mTileK, 64);
208-
TLLM_LOG_WARNING("Unsupported MmaK (", options.mMmaK, ") for ", gemm::toString(options.mDtypeElt),
208+
TLLM_LOG_WARNING("Unsupported MmaK (", options.mMmaK, ") for ", gemm::toString(options.mDtypeElt).c_str(),
209209
". Setting MmaK to 64 and TileK to ", newTileK);
210210
options.mMmaK = 64;
211211
options.mTileK = newTileK;
212212
}
213213
if (options.mMmaM != 128)
214214
{
215215
int newTileM = 128 * divUp(options.mTileM, 128);
216-
TLLM_LOG_WARNING("Unsupported MmaM (", options.mMmaM, ") for ", gemm::toString(options.mDtypeElt),
216+
TLLM_LOG_WARNING("Unsupported MmaM (", options.mMmaM, ") for ", gemm::toString(options.mDtypeElt).c_str(),
217217
". Setting MmaM to 128 and TileM to ", newTileM);
218218
options.mMmaM = 128;
219219
options.mTileM = newTileM;

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/runner.cu renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/runner.cu

+1-115
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.
@@ -18,7 +18,6 @@
1818
#include "gemmList.h"
1919
#include "runner.h"
2020
#include "trtllmGenSrc/DevKernel.h"
21-
#include "trtllmGenSrc/MixtureOfExpertsInterface.h"
2221
#include "trtllmGenSrc/RoutingKernel.h"
2322
#include <iostream>
2423

@@ -135,43 +134,6 @@ void Runner::run(void* hidden_state, void* hidden_state_scale, void* weight, voi
135134
TLLM_CHECK_WITH_INFO(selectedIndex.size() == 1, "Multiple kernels found for the given element type");
136135
auto const& kernelInfo = gemmList[*selectedIndex.begin()];
137136

138-
// TODO: remove this once we find the way to shuffle the weights offline
139-
// if (kernelInfo.shuffledMatrixA || kernelInfo.useFusedAct)
140-
// {
141-
// // Allocate temporary buffers for shuffled weights using vectors
142-
// auto numBitsPerElt = trtllm::gen::dtypeGetNumBits(mDtypeElt);
143-
// const size_t numBytesA = num_experts * hidden_size * intermediate_size * 2 * numBitsPerElt / /* bits */ 8;
144-
// std::vector<uint8_t> hShuffledA(numBytesA);
145-
146-
// auto numBitsPerSf = trtllm::gen::dtypeGetNumBits(tg::dtypeBlockSfType(mDtypeElt));
147-
// const size_t numSfBytes = num_experts * hidden_size * intermediate_size / 16 * 2 * numBitsPerSf / /* bits */
148-
// 8; std::vector<uint8_t> hShuffledASf(numSfBytes);
149-
150-
// // Copy weights to host
151-
// cudaMemcpy(hShuffledA.data(), weight, numBytesA, cudaMemcpyDeviceToHost);
152-
// cudaMemcpy(hShuffledASf.data(), weight_scale, numSfBytes, cudaMemcpyDeviceToHost);
153-
154-
// // Prepare and shuffle the weights
155-
// prepareBatchWeightsOnHost(hShuffledA.data(), // wIn
156-
// hShuffledASf.data(), // wSfIn
157-
// hShuffledA.data(), // wOut (in-place)
158-
// hShuffledASf.data(), // wSfOut (in-place)
159-
// mDtypeElt, // dtypeElt
160-
// intermediate_size * 2, // m (2x for gated activation)
161-
// hidden_size, // k
162-
// kernelInfo.epilogueTileM, // epilogueTileM (from tileN)
163-
// num_experts, // numBatches
164-
// kernelInfo.shuffledMatrixA, // useShuffleMatrix
165-
// kernelInfo.useFusedAct, // useFusedAct (for gated activation)
166-
// mDtypeElt == tg::Dtype::E2m1, // useBlockScaling
167-
// 16 // numEltsPerSf (for E2m1)
168-
// );
169-
170-
// // Copy shuffled weights back to device
171-
// // cudaMemcpy(weight, hShuffledA.data(), numBytesA, cudaMemcpyHostToDevice);
172-
// // cudaMemcpy(weight_scale, hShuffledASf.data(), numSfBytes, cudaMemcpyHostToDevice);
173-
// }
174-
175137
gemmCommon::MyOptions options;
176138
options.mTopK = top_k;
177139
options.mBatchM = false;
@@ -239,43 +201,6 @@ void Runner::run(void* permuted_hidden_state, void* permuted_hidden_state_scale,
239201
TLLM_CHECK_WITH_INFO(selectedIndex.size() == 1, "Multiple kernels found for the given element and output types");
240202
auto const& kernelInfo = gemmList[*selectedIndex.begin()];
241203

242-
// TODO: remove this once we find the way to shuffle the weights offline
243-
// if (kernelInfo.shuffledMatrixA)
244-
// {
245-
// // Allocate temporary buffers for shuffled weights using vectors
246-
// auto numBitsPerElt = trtllm::gen::dtypeGetNumBits(mDtypeElt);
247-
// const size_t numBytesA = num_experts * hidden_size * intermediate_size * numBitsPerElt / /* bits */ 8;
248-
// std::vector<uint8_t> hShuffledA(numBytesA);
249-
250-
// auto numBitsPerSf = trtllm::gen::dtypeGetNumBits(tg::dtypeBlockSfType(mDtypeElt));
251-
// const size_t numSfBytes = num_experts * hidden_size * intermediate_size / 16 * numBitsPerSf / /* bits */ 8;
252-
// std::vector<uint8_t> hShuffledASf(numSfBytes);
253-
254-
// // Copy weights to host
255-
// cudaMemcpy(hShuffledA.data(), weight, numBytesA, cudaMemcpyDeviceToHost);
256-
// cudaMemcpy(hShuffledASf.data(), weight_scale, numSfBytes, cudaMemcpyDeviceToHost);
257-
258-
// // Prepare and shuffle the weights
259-
// prepareBatchWeightsOnHost(hShuffledA.data(), // wIn
260-
// hShuffledASf.data(), // wSfIn
261-
// hShuffledA.data(), // wOut (in-place)
262-
// hShuffledASf.data(), // wSfOut (in-place)
263-
// mDtypeElt, // dtypeElt
264-
// hidden_size, // m
265-
// intermediate_size, // k
266-
// kernelInfo.epilogueTileM, // epilogueTileM (from tileN)
267-
// num_experts, // numBatches
268-
// kernelInfo.shuffledMatrixA, // useShuffleMatrix
269-
// false, // useFusedAct (for gated activation)
270-
// mDtypeElt == tg::Dtype::E2m1, // useBlockScaling
271-
// 16 // numEltsPerSf (for E2m1)
272-
// );
273-
274-
// // Copy shuffled weights back to device
275-
// // cudaMemcpy(weight, hShuffledA.data(), numBytesA, cudaMemcpyHostToDevice);
276-
// // cudaMemcpy(weight_scale, hShuffledASf.data(), numSfBytes, cudaMemcpyHostToDevice);
277-
// }
278-
279204
gemmCommon::MyOptions options;
280205
options.mTopK = top_k;
281206
options.mBatchM = false;
@@ -373,16 +298,7 @@ void Runner::run(MoERunnerArgs const& args, MoEWorkspace const& workspace, cudaS
373298

374299
setOpsData(args, workspace, convertSfData, activationData, finalizeData);
375300

376-
// Calling routing outside to properly allocate workspace
377-
// moe::dev::routing::run(routingData, stream);
378-
379301
void* hidden_states_scale_linear{args.hidden_states_scale};
380-
// FIXME check that we receive r128c4 sf layout
381-
// if (args.mDtypeElt == tg::Dtype::E2m1)
382-
// {
383-
// hidden_states_scale_linear = workspace.hidden_states_scale_linear;
384-
// moe::dev::convertsf::run(convertSfData, stream);
385-
// }
386302

387303
PermuteGemm1::Runner permuteGemm1(args.mDtypeElt);
388304
permuteGemm1.run(args.hidden_states, hidden_states_scale_linear, args.gemm1_weights, args.gemm1_weights_scale,
@@ -411,36 +327,6 @@ void Runner::run(MoERunnerArgs const& args, MoEWorkspace const& workspace, cudaS
411327

412328
// Run finalize
413329
moe::dev::finalize::run(finalizeData, stream);
414-
415-
// std::vector<uint8_t> gemm1_output_fp8(64 * args.intermediate_size / 2);
416-
// printf("array addr 0x%lx\n", &gemm1_output_fp8[0]);
417-
// printf("local_num_experts addr 0x%lx\n", &args.local_num_experts);
418-
// cudaMemcpy(gemm1_output_fp8.data(), workspace.gemm1_output, gemm1_output_fp8.size() * sizeof(uint8_t),
419-
// cudaMemcpyDeviceToHost);
420-
// std::cout << "args.local_num_experts: " << args.local_num_experts << std::endl;
421-
// std::cout << "gemm1 output (hex):" << std::endl;
422-
// for (int offset = 0; offset < 8; offset++)
423-
// {
424-
// int base = offset * 2048;
425-
// for (int i = 0; i < args.num_tokens; i++)
426-
// {
427-
// for (int j = 0; j < args.intermediate_size / 2; j += 16)
428-
// {
429-
// std::cout << "Token " << i << " [" << std::dec << base + j << "]: ";
430-
// for (int k = 0; k < 16 && (j + k) < args.intermediate_size / 2; k++)
431-
// {
432-
// // std::cout << "offset: " << std::dec << base + i * args.intermediate_size / 2 + j + k <<
433-
// // std::endl;
434-
// std::cout << "0x" << std::hex << std::setw(2) << std::setfill('0')
435-
// << static_cast<uint>(gemm1_output_fp8[base + i * args.intermediate_size / 2 + j + k])
436-
// << " ";
437-
// }
438-
// std::cout << std::endl;
439-
// }
440-
// std::cout << std::endl;
441-
// }
442-
// }
443-
// std::cout << "args.local_num_experts: " << args.local_num_experts << std::endl;
444330
}
445331
} // namespace MoE
446332

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/runner.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/runner.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/DevKernel.cu renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/DevKernel.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/DevKernel.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/DevKernel.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/Dtype.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/Dtype.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.
@@ -267,7 +267,7 @@ inline Dtype dtypeFromString(std::string const& str)
267267
}
268268
else
269269
{
270-
TLLM_LOG_ERROR("Unknown Dtype ", str);
270+
TLLM_LOG_ERROR("Unknown Dtype ", str.c_str());
271271
}
272272
return Dtype::Void;
273273
}

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/DtypeDecl.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/DtypeDecl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/Enums.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/Enums.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/KernelParams.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/KernelParams.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/KernelTraits.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/KernelTraits.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/RoutingKernel.cu renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/RoutingKernel.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/RoutingKernel.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/RoutingKernel.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.

cpp/tensorrt_llm/kernels/trtllmGenKernels/fp8BlockScaleMoe/trtllmGenSrc/SfLayout.h renamed to cpp/tensorrt_llm/kernels/trtllmGenKernels/blockScaleMoe/trtllmGenSrc/SfLayout.h

+13-13
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
1-
/***************************************************************************************************
2-
* Copyright (c) 2011-2025, NVIDIA CORPORATION. All rights reserved.
1+
/*
2+
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
33
*
4-
* Redistribution and use in source and binary forms, with or without modification, are not permit-
5-
* ted.
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
67
*
7-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
8-
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
9-
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
10-
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
11-
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
12-
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
13-
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
14-
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8+
* http://www.apache.org/licenses/LICENSE-2.0
159
*
16-
**************************************************************************************************/
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
1717
#pragma once
1818

1919
#include "SfLayoutDecl.h"

0 commit comments

Comments
 (0)