NIS_Scaler.h 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970
  1. // The MIT License(MIT)
  2. //
  3. // Copyright(c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
  4. //
  5. // Permission is hereby granted, free of charge, to any person obtaining a copy of
  6. // this software and associated documentation files(the "Software"), to deal in
  7. // the Software without restriction, including without limitation the rights to
  8. // use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of
  9. // the Software, and to permit persons to whom the Software is furnished to do so,
  10. // subject to the following conditions :
  11. //
  12. // The above copyright notice and this permission notice shall be included in all
  13. // copies or substantial portions of the Software.
  14. //
  15. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  17. // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR
  18. // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  19. // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  20. // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  21. //---------------------------------------------------------------------------------
  22. // NVIDIA Image Scaling SDK - v1.0
  23. //---------------------------------------------------------------------------------
  24. // The NVIDIA Image Scaling SDK provides a single spatial scaling and sharpening algorithm
  25. // for cross-platform support. The scaling algorithm uses a 6-tap scaling filter combined
  26. // with 4 directional scaling and adaptive sharpening filters, which creates nice smooth images
  27. // and sharp edges. In addition, the SDK provides a state-of-the-art adaptive directional sharpening algorithm
  28. // for use in applications where no scaling is required.
  29. //
  30. // The directional scaling and sharpening algorithm is named NVScaler while the adaptive-directional-sharpening-only
  31. // algorithm is named NVSharpen. Both algorithms are provided as compute shaders and
  32. // developers are free to integrate them in their applications. Note that if you integrate NVScaler, you
  33. // should NOT integrate NVSharpen, as NVScaler already includes a sharpening pass
  34. //
  35. // Pipeline Placement
  36. // ------------------
  37. // The call into the NVIDIA Image Scaling shaders must occur during the post-processing phase after tone-mapping.
  38. // Applying the scaling in linear HDR in-game color-space may result in a sharpening effect that is
  39. // either not visible or too strong. Since sharpening algorithms can enhance noisy or grainy regions, it is recommended
  40. // that certain effects such as film grain should occur after NVScaler or NVSharpen. Low-pass filters such as motion blur or
  41. // light bloom are recommended to be applied before NVScaler or NVSharpen to avoid sharpening attenuation.
  42. //
  43. // Color Space and Ranges
  44. // ----------------------
  45. // NVIDIA Image Scaling shaders can process color textures stored as either LDR or HDR with the following
  46. // restrictions:
  47. // 1) LDR
  48. // - The range of color values must be in the [0, 1] range
  49. // - The input color texture must be in display-referred color-space after tone mapping and OETF (gamma-correction)
  50. // has been applied
  51. // 2) HDR PQ
  52. // - The range of color values must be in the [0, 1] range
  53. // - The input color texture must be in display-referred color-space after tone mapping with Rec.2020 PQ OETF applied
  54. // 3) HDR Linear
  55. // - The recommended range of color values is [0, 12.5], where luminance value (as per BT. 709) of
  56. // 1.0 maps to brightness value of 80nits (sRGB peak) and 12.5 maps to 1000nits
  57. // - The input color texture may have luminance values that are either linear and scene-referred or
  58. // linear and display-referred (after tone mapping)
  59. //
  60. // If the input color texture sent to NVScaler/NVSharpen is in HDR format set NIS_HDR_MODE define to either
  61. // NIS_HDR_MODE_LINEAR (1) or NIS_HDR_MODE_PQ (2).
  62. //
  63. // Supported Texture Formats
  64. // -------------------------
  65. // Input and output formats:
  66. // Input and output formats are expected to be in the rages defined in previous section and should be
  67. // specified using non-integer data types such as DXGI_FORMAT_R8G8B8A8_UNORM.
  68. //
  69. // Coefficients formats:
  70. // The scaler coefficients and USM coefficients format should be specified using float4 type such as
  71. // DXGI_FORMAT_R32G32B32A32_FLOAT or DXGI_FORMAT_R16G16B16A16_FLOAT.
  72. //
  73. // Resource States, Buffers, and Sampler:
  74. // The game or application calling NVIDIA Image Scaling SDK shaders must ensure that the textures are in
  75. // the correct state.
  76. // - Input color textures must be in pixel shader read state. Shader Resource View (SRV) in DirectX
  77. // - The output texture must be in read/write state. Unordered Access View (UAV) in DirectX
  78. // - The coefficients texture for NVScaler must be in read state. Shader Resource View (SRV) in DirectX
  79. // - The configuration variables must be passed as constant buffer. Constant Buffer View (CBV) in DirectX
  80. // - The sampler for texture pixel sampling. Linear clamp SamplerState in Direct
  81. //
  82. // Adding NVIDIA Image Scaling SDK to a Project
  83. // --------------------------------------------
  84. // Include NIS_Scaler.h directly in your application or alternative use the provided NIS_Main.hlsl shader file.
  85. // Use NIS_Config.h to get the ideal shader dispatch values for your platform, to configure the algorithm constant
  86. // values (NVScalerUpdateConfig, and NVSharpenUpdateConfig), and to access the algorithm coefficients (coef_scale and coef_USM).
  87. //
  88. // Defines:
  89. // NIS_SCALER: default (1) NVScaler, (0) fast NVSharpen only, no upscaling
  90. // NIS_HDR_MODE: default (0) disabled, (1) Linear, (2) PQ
  91. // NIS_BLOCK_WIDTH: pixels per block width. Use GetOptimalBlockWidth query for your platform
  92. // NIS_BLOCK_HEIGHT: pixels per block height. Use GetOptimalBlockHeight query for your platform
  93. // NIS_THREAD_GROUP_SIZE: number of threads per group. Use GetOptimalThreadGroupSize query for your platform
  94. // NIS_USE_HALF_PRECISION: default (0) disabled, (1) enable half pression computation
  95. // NIS_HLSL: (1) enabled, (0) disabled
  96. // NIS_HLSL_6_2: default (0) HLSL v5, (1) HLSL v6.2 forces NIS_HLSL=1
  97. // NIS_GLSL: (1) enabled, (0) disabled
  98. // NIS_VIEWPORT_SUPPORT: default(0) disabled, (1) enable input/output viewport support
  99. //
  100. // Default NVScaler shader constants:
  101. // [NIS_BLOCK_WIDTH, NIS_BLOCK_HEIGHT, NIS_THREAD_GROUP_SIZE] = [32, 24, 256]
  102. //
  103. // Default NVSharpen shader constants:
  104. // [NIS_BLOCK_WIDTH, NIS_BLOCK_HEIGHT, NIS_THREAD_GROUP_SIZE] = [32, 32, 256]
  105. //---------------------------------------------------------------------------------
  106. // NVScaler enable by default. Set to 0 for NVSharpen only
  107. #ifndef NIS_SCALER
  108. #define NIS_SCALER 1
  109. #endif
  110. // HDR Modes
  111. #define NIS_HDR_MODE_NONE 0
  112. #define NIS_HDR_MODE_LINEAR 1
  113. #define NIS_HDR_MODE_PQ 2
  114. #ifndef NIS_HDR_MODE
  115. #define NIS_HDR_MODE NIS_HDR_MODE_NONE
  116. #endif
  117. #define kHDRCompressionFactor 0.282842712f
  118. // Viewport support
  119. #ifndef NIS_VIEWPORT_SUPPORT
  120. #define NIS_VIEWPORT_SUPPORT 0
  121. #endif
  122. // HLSL, GLSL
  123. #if NIS_HLSL==0 && !defined(NIS_GLSL)
  124. #define NIS_GLSL 1
  125. #endif
  126. #if NIS_HLSL_6_2 || (!NIS_GLSL && !NIS_HLSL)
  127. #if defined(NIS_HLSL)
  128. #undef NIS_HLSL
  129. #endif
  130. #define NIS_HLSL 1
  131. #endif
  132. #if NIS_HLSL && NIS_GLSL
  133. #undef NIS_GLSL
  134. #define NIS_GLSL 0
  135. #endif
  136. // Half precision
  137. #ifndef NIS_USE_HALF_PRECISION
  138. #define NIS_USE_HALF_PRECISION 0
  139. #endif
  140. #if NIS_HLSL
  141. // Generic type and function aliases for HLSL
  142. #define NVF float
  143. #define NVF2 float2
  144. #define NVF3 float3
  145. #define NVF4 float4
  146. #define NVI int
  147. #define NVI2 int2
  148. #define NVU uint
  149. #define NVU2 uint2
  150. #if NIS_USE_HALF_PRECISION
  151. #if NIS_HLSL_6_2
  152. #define NVH float16_t
  153. #define NVH2 float16_t2
  154. #define NVH3 float16_t3
  155. #define NVH4 float16_t4
  156. #else
  157. #define NVH min16float
  158. #define NVH2 min16float2
  159. #define NVH3 min16float3
  160. #define NVH4 min16float4
  161. #endif // NIS_HLSL_6_2
  162. #else // FP32 types
  163. #define NVH NVF
  164. #define NVH2 NVF2
  165. #define NVH3 NVF3
  166. #define NVH4 NVF4
  167. #endif // NIS_USE_HALF_PRECISION
  168. #define NVSHARED groupshared
  169. #define NVTEX_LOAD(x, pos) x[pos]
  170. #define NVTEX_SAMPLE(x, sampler, pos) x.SampleLevel(sampler, pos, 0)
  171. #define NVTEX_SAMPLE_RED(x, sampler, pos) x.GatherRed(sampler, pos)
  172. #define NVTEX_SAMPLE_GREEN(x, sampler, pos) x.GatherGreen(sampler, pos)
  173. #define NVTEX_SAMPLE_BLUE(x, sampler, pos) x.GatherBlue(sampler, pos)
  174. #define NVTEX_STORE(x, pos, v) x[pos] = v
  175. #ifndef NIS_UNROLL
  176. #define NIS_UNROLL [unroll]
  177. #endif
  178. #endif // NIS_HLSL
  179. // Generic type and function aliases for GLSL
  180. #if NIS_GLSL
  181. #define NVF float
  182. #define NVF2 vec2
  183. #define NVF3 vec3
  184. #define NVF4 vec4
  185. #define NVI int
  186. #define NVI2 ivec2
  187. #define NVU uint
  188. #define NVU2 uvec2
  189. #if NIS_USE_HALF_PRECISION
  190. #define NVH float16_t
  191. #define NVH2 f16vec2
  192. #define NVH3 f16vec3
  193. #define NVH4 f16vec4
  194. #else // FP32 types
  195. #define NVH NVF
  196. #define NVH2 NVF2
  197. #define NVH3 NVF3
  198. #define NVH4 NVF4
  199. #endif // NIS_USE_HALF_PRECISION
  200. #define NVSHARED shared
  201. #define NVTEX_LOAD(x, pos) texelFetch(sampler2D(x, samplerLinearClamp), pos, 0)
  202. #define NVTEX_SAMPLE(x, sampler, pos) textureLod(sampler2D(x, sampler), pos, 0)
  203. #define NVTEX_SAMPLE_RED(x, sampler, pos) textureGather(sampler2D(x, sampler), pos, 0)
  204. #define NVTEX_SAMPLE_GREEN(x, sampler, pos) textureGather(sampler2D(x, sampler), pos, 1)
  205. #define NVTEX_SAMPLE_BLUE(x, sampler, pos) textureGather(sampler2D(x, sampler), pos, 2)
  206. #define NVTEX_STORE(x, pos, v) imageStore(x, NVI2(pos), v)
  207. #define saturate(x) clamp(x, 0, 1)
  208. #define lerp(a, b, x) mix(a, b, x)
  209. #define GroupMemoryBarrierWithGroupSync() groupMemoryBarrier(); barrier()
  210. #ifndef NIS_UNROLL
  211. #define NIS_UNROLL
  212. #endif
  213. #endif // NIS_GLSL
  214. // Texture gather
  215. #ifndef NIS_TEXTURE_GATHER
  216. #define NIS_TEXTURE_GATHER 0
  217. #endif
  218. // NIS Scaling
  219. #define NIS_SCALE_INT 1
  220. #define NIS_SCALE_FLOAT NVF(1.f)
  221. NVF getY(NVF3 rgba)
  222. {
  223. #if NIS_HDR_MODE == NIS_HDR_MODE_PQ
  224. return NVF(0.262f) * rgba.x + NVF(0.678f) * rgba.y + NVF(0.0593f) * rgba.z;
  225. #elif NIS_HDR_MODE == NIS_HDR_MODE_LINEAR
  226. return sqrt(NVF(0.2126f) * rgba.x + NVF(0.7152f) * rgba.y + NVF(0.0722f) * rgba.z) * kHDRCompressionFactor;
  227. #else
  228. return NVF(0.2126f) * rgba.x + NVF(0.7152f) * rgba.y + NVF(0.0722f) * rgba.z;
  229. #endif
  230. }
  231. NVF getYLinear(NVF3 rgba)
  232. {
  233. return NVF(0.2126f) * rgba.x + NVF(0.7152f) * rgba.y + NVF(0.0722f) * rgba.z;
  234. }
  235. #if NIS_SCALER
  236. NVF4 GetEdgeMap(NVF p[4][4], NVI i, NVI j)
  237. #else
  238. NVF4 GetEdgeMap(NVF p[5][5], NVI i, NVI j)
  239. #endif
  240. {
  241. const NVF g_0 = abs(p[0 + i][0 + j] + p[0 + i][1 + j] + p[0 + i][2 + j] - p[2 + i][0 + j] - p[2 + i][1 + j] - p[2 + i][2 + j]);
  242. const NVF g_45 = abs(p[1 + i][0 + j] + p[0 + i][0 + j] + p[0 + i][1 + j] - p[2 + i][1 + j] - p[2 + i][2 + j] - p[1 + i][2 + j]);
  243. const NVF g_90 = abs(p[0 + i][0 + j] + p[1 + i][0 + j] + p[2 + i][0 + j] - p[0 + i][2 + j] - p[1 + i][2 + j] - p[2 + i][2 + j]);
  244. const NVF g_135 = abs(p[1 + i][0 + j] + p[2 + i][0 + j] + p[2 + i][1 + j] - p[0 + i][1 + j] - p[0 + i][2 + j] - p[1 + i][2 + j]);
  245. const NVF g_0_90_max = max(g_0, g_90);
  246. const NVF g_0_90_min = min(g_0, g_90);
  247. const NVF g_45_135_max = max(g_45, g_135);
  248. const NVF g_45_135_min = min(g_45, g_135);
  249. NVF e_0_90 = 0;
  250. NVF e_45_135 = 0;
  251. if ((g_0_90_max + g_45_135_max) != 0)
  252. {
  253. e_0_90 = g_0_90_max / (g_0_90_max + g_45_135_max);
  254. e_0_90 = min(e_0_90, 1.0f);
  255. e_45_135 = 1.0f - e_0_90;
  256. }
  257. NVF e = ((g_0_90_max > (g_0_90_min * kDetectRatio)) && (g_0_90_max > kDetectThres) && (g_0_90_max > g_45_135_min)) ? 1.f : 0.f;
  258. NVF edge_0 = (g_0_90_max == g_0) ? e : 0.f;
  259. NVF edge_90 = (g_0_90_max == g_0) ? 0.f : e;
  260. e = ((g_45_135_max > (g_45_135_min * kDetectRatio)) && (g_45_135_max > kDetectThres) && (g_45_135_max > g_0_90_min)) ? 1.f : 0.f;
  261. NVF edge_45 = (g_45_135_max == g_45) ? e : 0.f;
  262. NVF edge_135 = (g_45_135_max == g_45) ? 0.f : e;
  263. NVF weight_0 = 0.f;
  264. NVF weight_90 = 0.f;
  265. NVF weight_45 = 0.f;
  266. NVF weight_135 = 0.f;
  267. if ((edge_0 + edge_90 + edge_45 + edge_135) >= 2.0f)
  268. {
  269. weight_0 = (edge_0 == 1.0f) ? e_0_90 : 0.f;
  270. weight_90 = (edge_0 == 1.0f) ? 0.f : e_0_90;
  271. weight_45 = (edge_45 == 1.0f) ? e_45_135 : 0.f;
  272. weight_135 = (edge_45 == 1.0f) ? 0.f : e_45_135;
  273. }
  274. else if ((edge_0 + edge_90 + edge_45 + edge_135) >= 1.0f)
  275. {
  276. weight_0 = edge_0;
  277. weight_90 = edge_90;
  278. weight_45 = edge_45;
  279. weight_135 = edge_135;
  280. }
  281. return NVF4(weight_0, weight_90, weight_45, weight_135);
  282. }
  283. #if NIS_SCALER
  284. #ifndef NIS_BLOCK_WIDTH
  285. #define NIS_BLOCK_WIDTH 32
  286. #endif
  287. #ifndef NIS_BLOCK_HEIGHT
  288. #define NIS_BLOCK_HEIGHT 24
  289. #endif
  290. #ifndef NIS_THREAD_GROUP_SIZE
  291. #define NIS_THREAD_GROUP_SIZE 256
  292. #endif
  293. #define kPhaseCount 64
  294. #define kFilterSize 6
  295. #define kSupportSize 6
  296. #define kPadSize kSupportSize
  297. // 'Tile' is the region of source luminance values that we load into shPixelsY.
  298. // It is the area of source pixels covered by the destination 'Block' plus a
  299. // 3 pixel border of support pixels.
  300. #define kTilePitch (NIS_BLOCK_WIDTH + kPadSize)
  301. #define kTileSize (kTilePitch * (NIS_BLOCK_HEIGHT + kPadSize))
  302. // 'EdgeMap' is the region of source pixels for which edge map vectors are derived.
  303. // It is the area of source pixels covered by the destination 'Block' plus a
  304. // 1 pixel border.
  305. #define kEdgeMapPitch (NIS_BLOCK_WIDTH + 2)
  306. #define kEdgeMapSize (kEdgeMapPitch * (NIS_BLOCK_HEIGHT + 2))
  307. NVSHARED NVF shPixelsY[kTileSize];
  308. NVSHARED NVH shCoefScaler[kPhaseCount][kFilterSize];
  309. NVSHARED NVH shCoefUSM[kPhaseCount][kFilterSize];
  310. NVSHARED NVH4 shEdgeMap[kEdgeMapSize];
  311. void LoadFilterBanksSh(NVI i0, NVI di) {
  312. // Load up filter banks to shared memory
  313. // The work is spread over (kPhaseCount * 2) threads
  314. for (NVI i = i0; i < kPhaseCount * 2; i += di)
  315. {
  316. NVI phase = i / 2;
  317. NVI vIdx = i & 1;
  318. NVH4 v = NVH4(NVTEX_LOAD(coef_scaler, NVI2(vIdx, phase)));
  319. NVI filterOffset = vIdx * 4;
  320. shCoefScaler[phase][filterOffset + 0] = v.x;
  321. shCoefScaler[phase][filterOffset + 1] = v.y;
  322. if (vIdx == 0)
  323. {
  324. shCoefScaler[phase][2] = v.z;
  325. shCoefScaler[phase][3] = v.w;
  326. }
  327. v = NVH4(NVTEX_LOAD(coef_usm, NVI2(vIdx, phase)));
  328. shCoefUSM[phase][filterOffset + 0] = v.x;
  329. shCoefUSM[phase][filterOffset + 1] = v.y;
  330. if (vIdx == 0)
  331. {
  332. shCoefUSM[phase][2] = v.z;
  333. shCoefUSM[phase][3] = v.w;
  334. }
  335. }
  336. }
  337. NVF CalcLTI(NVF p0, NVF p1, NVF p2, NVF p3, NVF p4, NVF p5, NVI phase_index)
  338. {
  339. const bool selector = (phase_index <= kPhaseCount / 2);
  340. NVF sel = selector ? p0 : p3;
  341. const NVF a_min = min(min(p1, p2), sel);
  342. const NVF a_max = max(max(p1, p2), sel);
  343. sel = selector ? p2 : p5;
  344. const NVF b_min = min(min(p3, p4), sel);
  345. const NVF b_max = max(max(p3, p4), sel);
  346. const NVF a_cont = a_max - a_min;
  347. const NVF b_cont = b_max - b_min;
  348. const NVF cont_ratio = max(a_cont, b_cont) / (min(a_cont, b_cont) + kEps);
  349. return (1.0f - saturate((cont_ratio - kMinContrastRatio) * kRatioNorm)) * kContrastBoost;
  350. }
  351. NVF4 GetInterpEdgeMap(const NVF4 edge[2][2], NVF phase_frac_x, NVF phase_frac_y)
  352. {
  353. NVF4 h0 = lerp(edge[0][0], edge[0][1], phase_frac_x);
  354. NVF4 h1 = lerp(edge[1][0], edge[1][1], phase_frac_x);
  355. return lerp(h0, h1, phase_frac_y);
  356. }
  357. NVF EvalPoly6(const NVF pxl[6], NVI phase_int)
  358. {
  359. NVF y = 0.f;
  360. {
  361. NIS_UNROLL
  362. for (NVI i = 0; i < 6; ++i)
  363. {
  364. y += shCoefScaler[phase_int][i] * pxl[i];
  365. }
  366. }
  367. NVF y_usm = 0.f;
  368. {
  369. NIS_UNROLL
  370. for (NVI i = 0; i < 6; ++i)
  371. {
  372. y_usm += shCoefUSM[phase_int][i] * pxl[i];
  373. }
  374. }
  375. // let's compute a piece-wise ramp based on luma
  376. const NVF y_scale = 1.0f - saturate((y * (1.0f / NIS_SCALE_FLOAT) - kSharpStartY) * kSharpScaleY);
  377. // scale the ramp to sharpen as a function of luma
  378. const NVF y_sharpness = y_scale * kSharpStrengthScale + kSharpStrengthMin;
  379. y_usm *= y_sharpness;
  380. // scale the ramp to limit USM as a function of luma
  381. const NVF y_sharpness_limit = (y_scale * kSharpLimitScale + kSharpLimitMin) * y;
  382. y_usm = min(y_sharpness_limit, max(-y_sharpness_limit, y_usm));
  383. // reduce ringing
  384. y_usm *= CalcLTI(pxl[0], pxl[1], pxl[2], pxl[3], pxl[4], pxl[5], phase_int);
  385. return y + y_usm;
  386. }
  387. NVF FilterNormal(const NVF p[6][6], NVI phase_x_frac_int, NVI phase_y_frac_int)
  388. {
  389. NVF h_acc = 0.0f;
  390. NIS_UNROLL
  391. for (NVI j = 0; j < 6; ++j)
  392. {
  393. NVF v_acc = 0.0f;
  394. NIS_UNROLL
  395. for (NVI i = 0; i < 6; ++i)
  396. {
  397. v_acc += p[i][j] * shCoefScaler[phase_y_frac_int][i];
  398. }
  399. h_acc += v_acc * shCoefScaler[phase_x_frac_int][j];
  400. }
  401. // let's return the sum unpacked -> we can accumulate it later
  402. return h_acc;
  403. }
  404. NVF4 GetDirFilters(NVF p[6][6], NVF phase_x_frac, NVF phase_y_frac, NVI phase_x_frac_int, NVI phase_y_frac_int)
  405. {
  406. NVF4 f;
  407. // 0 deg filter
  408. NVF interp0Deg[6];
  409. {
  410. NIS_UNROLL
  411. for (NVI i = 0; i < 6; ++i)
  412. {
  413. interp0Deg[i] = lerp(p[i][2], p[i][3], phase_x_frac);
  414. }
  415. }
  416. f.x = EvalPoly6(interp0Deg, phase_y_frac_int);
  417. // 90 deg filter
  418. NVF interp90Deg[6];
  419. {
  420. NIS_UNROLL
  421. for (NVI i = 0; i < 6; ++i)
  422. {
  423. interp90Deg[i] = lerp(p[2][i], p[3][i], phase_y_frac);
  424. }
  425. }
  426. f.y = EvalPoly6(interp90Deg, phase_x_frac_int);
  427. //45 deg filter
  428. NVF pphase_b45;
  429. pphase_b45 = 0.5f + 0.5f * (phase_x_frac - phase_y_frac);
  430. NVF temp_interp45Deg[7];
  431. temp_interp45Deg[1] = lerp(p[2][1], p[1][2], pphase_b45);
  432. temp_interp45Deg[3] = lerp(p[3][2], p[2][3], pphase_b45);
  433. temp_interp45Deg[5] = lerp(p[4][3], p[3][4], pphase_b45);
  434. {
  435. pphase_b45 = pphase_b45 - 0.5f;
  436. NVF a = (pphase_b45 >= 0.f) ? p[0][2] : p[2][0];
  437. NVF b = (pphase_b45 >= 0.f) ? p[1][3] : p[3][1];
  438. NVF c = (pphase_b45 >= 0.f) ? p[2][4] : p[4][2];
  439. NVF d = (pphase_b45 >= 0.f) ? p[3][5] : p[5][3];
  440. temp_interp45Deg[0] = lerp(p[1][1], a, abs(pphase_b45));
  441. temp_interp45Deg[2] = lerp(p[2][2], b, abs(pphase_b45));
  442. temp_interp45Deg[4] = lerp(p[3][3], c, abs(pphase_b45));
  443. temp_interp45Deg[6] = lerp(p[4][4], d, abs(pphase_b45));
  444. }
  445. NVF interp45Deg[6];
  446. NVF pphase_p45 = phase_x_frac + phase_y_frac;
  447. if (pphase_p45 >= 1)
  448. {
  449. NIS_UNROLL
  450. for (NVI i = 0; i < 6; i++)
  451. {
  452. interp45Deg[i] = temp_interp45Deg[i + 1];
  453. }
  454. pphase_p45 = pphase_p45 - 1;
  455. }
  456. else
  457. {
  458. NIS_UNROLL
  459. for (NVI i = 0; i < 6; i++)
  460. {
  461. interp45Deg[i] = temp_interp45Deg[i];
  462. }
  463. }
  464. f.z = EvalPoly6(interp45Deg, NVI(pphase_p45 * 64));
  465. //135 deg filter
  466. NVF pphase_b135;
  467. pphase_b135 = 0.5f * (phase_x_frac + phase_y_frac);
  468. NVF temp_interp135Deg[7];
  469. temp_interp135Deg[1] = lerp(p[3][1], p[4][2], pphase_b135);
  470. temp_interp135Deg[3] = lerp(p[2][2], p[3][3], pphase_b135);
  471. temp_interp135Deg[5] = lerp(p[1][3], p[2][4], pphase_b135);
  472. {
  473. pphase_b135 = pphase_b135 - 0.5f;
  474. NVF a = (pphase_b135 >= 0.f) ? p[5][2] : p[3][0];
  475. NVF b = (pphase_b135 >= 0.f) ? p[4][3] : p[2][1];
  476. NVF c = (pphase_b135 >= 0.f) ? p[3][4] : p[1][2];
  477. NVF d = (pphase_b135 >= 0.f) ? p[2][5] : p[0][3];
  478. temp_interp135Deg[0] = lerp(p[4][1], a, abs(pphase_b135));
  479. temp_interp135Deg[2] = lerp(p[3][2], b, abs(pphase_b135));
  480. temp_interp135Deg[4] = lerp(p[2][3], c, abs(pphase_b135));
  481. temp_interp135Deg[6] = lerp(p[1][4], d, abs(pphase_b135));
  482. }
  483. NVF interp135Deg[6];
  484. NVF pphase_p135 = 1 + (phase_x_frac - phase_y_frac);
  485. if (pphase_p135 >= 1)
  486. {
  487. NIS_UNROLL
  488. for (NVI i = 0; i < 6; ++i)
  489. {
  490. interp135Deg[i] = temp_interp135Deg[i + 1];
  491. }
  492. pphase_p135 = pphase_p135 - 1;
  493. }
  494. else
  495. {
  496. NIS_UNROLL
  497. for (NVI i = 0; i < 6; ++i)
  498. {
  499. interp135Deg[i] = temp_interp135Deg[i];
  500. }
  501. }
  502. f.w = EvalPoly6(interp135Deg, NVI(pphase_p135 * 64));
  503. return f;
  504. }
  505. //-----------------------------------------------------------------------------------------------
  506. // NVScaler
  507. //-----------------------------------------------------------------------------------------------
  508. void NVScaler(NVU2 blockIdx, NVU threadIdx)
  509. {
  510. // Figure out the range of pixels from input image that would be needed to be loaded for this thread-block
  511. NVI dstBlockX = NVI(NIS_BLOCK_WIDTH * blockIdx.x);
  512. NVI dstBlockY = NVI(NIS_BLOCK_HEIGHT * blockIdx.y);
  513. const NVI srcBlockStartX = NVI(floor((dstBlockX + 0.5f) * kScaleX - 0.5f));
  514. const NVI srcBlockStartY = NVI(floor((dstBlockY + 0.5f) * kScaleY - 0.5f));
  515. const NVI srcBlockEndX = NVI(ceil((dstBlockX + NIS_BLOCK_WIDTH + 0.5f) * kScaleX - 0.5f));
  516. const NVI srcBlockEndY = NVI(ceil((dstBlockY + NIS_BLOCK_HEIGHT + 0.5f) * kScaleY - 0.5f));
  517. NVI numTilePixelsX = srcBlockEndX - srcBlockStartX + kSupportSize - 1;
  518. NVI numTilePixelsY = srcBlockEndY - srcBlockStartY + kSupportSize - 1;
  519. // round-up load region to even size since we're loading in 2x2 batches
  520. numTilePixelsX += numTilePixelsX & 0x1;
  521. numTilePixelsY += numTilePixelsY & 0x1;
  522. const NVI numTilePixels = numTilePixelsX * numTilePixelsY;
  523. // calculate the equivalent values for the edge map
  524. const NVI numEdgeMapPixelsX = numTilePixelsX - kSupportSize + 2;
  525. const NVI numEdgeMapPixelsY = numTilePixelsY - kSupportSize + 2;
  526. const NVI numEdgeMapPixels = numEdgeMapPixelsX * numEdgeMapPixelsY;
  527. // fill in input luma tile (shPixelsY) in batches of 2x2 pixels
  528. // we use texture gather to get extra support necessary
  529. // to compute 2x2 edge map outputs too
  530. {
  531. for (NVI i = NVI(threadIdx) * 2; i < numTilePixels / 2; i += NIS_THREAD_GROUP_SIZE * 2)
  532. {
  533. NVI py = (i / numTilePixelsX) * 2;
  534. NVI px = i % numTilePixelsX;
  535. // 0.5 to be in the center of texel
  536. // - (kSupportSize - 1) / 2 to shift by the kernel support size
  537. NVF kShift = 0.5f - (kSupportSize - 1) / 2;
  538. #if NIS_VIEWPORT_SUPPORT
  539. const NVF tx = (srcBlockStartX + px + kInputViewportOriginX + kShift) * kSrcNormX;
  540. const NVF ty = (srcBlockStartY + py + kInputViewportOriginY + kShift) * kSrcNormY;
  541. #else
  542. const NVF tx = (srcBlockStartX + px + kShift) * kSrcNormX;
  543. const NVF ty = (srcBlockStartY + py + kShift) * kSrcNormY;
  544. #endif
  545. NVF p[2][2];
  546. #if NIS_TEXTURE_GATHER
  547. {
  548. const NVF4 sr = NVTEX_SAMPLE_RED(in_texture, samplerLinearClamp, NVF2(tx, ty));
  549. const NVF4 sg = NVTEX_SAMPLE_GREEN(in_texture, samplerLinearClamp, NVF2(tx, ty));
  550. const NVF4 sb = NVTEX_SAMPLE_BLUE(in_texture, samplerLinearClamp, NVF2(tx, ty));
  551. p[0][0] = getY(NVF3(sr.w, sg.w, sb.w));
  552. p[0][1] = getY(NVF3(sr.z, sg.z, sb.z));
  553. p[1][0] = getY(NVF3(sr.x, sg.x, sb.x));
  554. p[1][1] = getY(NVF3(sr.y, sg.y, sb.y));
  555. }
  556. #else
  557. NIS_UNROLL
  558. for (NVI j = 0; j < 2; j++)
  559. {
  560. NIS_UNROLL
  561. for (NVI k = 0; k < 2; k++)
  562. {
  563. const NVF4 px = NVTEX_SAMPLE(in_texture, samplerLinearClamp, NVF2(tx + k * kSrcNormX, ty + j * kSrcNormY));
  564. p[j][k] = getY(px.xyz);
  565. }
  566. }
  567. #endif
  568. const NVI idx = py * kTilePitch + px;
  569. shPixelsY[idx] = NVH(p[0][0]);
  570. shPixelsY[idx + 1] = NVH(p[0][1]);
  571. shPixelsY[idx + kTilePitch] = NVH(p[1][0]);
  572. shPixelsY[idx + kTilePitch + 1] = NVH(p[1][1]);
  573. }
  574. }
  575. GroupMemoryBarrierWithGroupSync();
  576. {
  577. // fill in the edge map of 2x2 pixels
  578. for (NVI i = NVI(threadIdx) * 2; i < numEdgeMapPixels / 2; i += NIS_THREAD_GROUP_SIZE * 2)
  579. {
  580. NVI py = (i / numEdgeMapPixelsX) * 2;
  581. NVI px = i % numEdgeMapPixelsX;
  582. const NVI edgeMapIdx = py * kEdgeMapPitch + px;
  583. NVI tileCornerIdx = (py+1) * kTilePitch + px + 1;
  584. NVF p[4][4];
  585. NIS_UNROLL
  586. for (NVI j = 0; j < 4; j++)
  587. {
  588. NIS_UNROLL
  589. for (NVI k = 0; k < 4; k++)
  590. {
  591. p[j][k] = shPixelsY[tileCornerIdx + j * kTilePitch + k];
  592. }
  593. }
  594. shEdgeMap[edgeMapIdx] = NVH4(GetEdgeMap(p, 0, 0));
  595. shEdgeMap[edgeMapIdx + 1] = NVH4(GetEdgeMap(p, 0, 1));
  596. shEdgeMap[edgeMapIdx + kEdgeMapPitch] = NVH4(GetEdgeMap(p, 1, 0));
  597. shEdgeMap[edgeMapIdx + kEdgeMapPitch + 1] = NVH4(GetEdgeMap(p, 1, 1));
  598. }
  599. }
  600. LoadFilterBanksSh(NVI(threadIdx), NIS_THREAD_GROUP_SIZE);
  601. GroupMemoryBarrierWithGroupSync();
  602. for (NVI k = NVI(threadIdx); k < NIS_BLOCK_WIDTH * NIS_BLOCK_HEIGHT; k += NIS_THREAD_GROUP_SIZE)
  603. {
  604. const NVI2 pos = NVI2(k % NIS_BLOCK_WIDTH, k / NIS_BLOCK_WIDTH);
  605. const NVI dstX = dstBlockX + pos.x;
  606. const NVI dstY = dstBlockY + pos.y;
  607. const NVF srcX = (0.5f + dstX) * kScaleX - 0.5f;
  608. const NVF srcY = (0.5f + dstY) * kScaleY - 0.5f;
  609. #if NIS_VIEWPORT_SUPPORT
  610. if (srcX > kInputViewportWidth || srcY > kInputViewportHeight ||
  611. dstX > kOutputViewportWidth || dstY > kOutputViewportHeight)
  612. {
  613. return;
  614. }
  615. #endif
  616. const NVI px = NVI(floor(srcX) - srcBlockStartX);
  617. const NVI py = NVI(floor(srcY) - srcBlockStartY);
  618. const NVI startTileIdx = py * kTilePitch + px;
  619. // load 6x6 support to regs
  620. NVF p[6][6];
  621. {
  622. NIS_UNROLL
  623. for (NVI i = 0; i < 6; ++i)
  624. {
  625. NIS_UNROLL
  626. for (NVI j = 0; j < 6; ++j)
  627. {
  628. p[i][j] = shPixelsY[startTileIdx + i * kTilePitch + j];
  629. }
  630. }
  631. }
  632. // compute discretized filter phase
  633. const NVF fx = srcX - floor(srcX);
  634. const NVF fy = srcY - floor(srcY);
  635. const NVI fx_int = NVI(fx * kPhaseCount);
  636. const NVI fy_int = NVI(fy * kPhaseCount);
  637. // get traditional scaler filter output
  638. const NVF pixel_n = FilterNormal(p, fx_int, fy_int);
  639. // get directional filter bank output
  640. NVF4 opDirYU = GetDirFilters(p, fx, fy, fx_int, fy_int);
  641. // final luma is a weighted product of directional & normal filters
  642. // generate weights for directional filters
  643. const NVI startEdgeMapIdx = py * kEdgeMapPitch + px;
  644. NVF4 edge[2][2];
  645. NIS_UNROLL
  646. for (NVI i = 0; i < 2; i++)
  647. {
  648. NIS_UNROLL
  649. for (NVI j = 0; j < 2; j++)
  650. {
  651. // need to shift edge map sampling since it's a 2x2 centered inside 6x6 grid
  652. edge[i][j] = shEdgeMap[startEdgeMapIdx + (i * kEdgeMapPitch) + j];
  653. }
  654. }
  655. const NVF4 w = GetInterpEdgeMap(edge, fx, fy) * NIS_SCALE_INT;
  656. // final pixel is a weighted sum filter outputs
  657. const NVF opY = (opDirYU.x * w.x + opDirYU.y * w.y + opDirYU.z * w.z + opDirYU.w * w.w +
  658. pixel_n * (NIS_SCALE_FLOAT - w.x - w.y - w.z - w.w)) * (1.0f / NIS_SCALE_FLOAT);
  659. // do bilinear tap for chroma upscaling
  660. #if NIS_VIEWPORT_SUPPORT
  661. NVF4 op = NVTEX_SAMPLE(in_texture, samplerLinearClamp, NVF2((srcX + kInputViewportOriginX) * kSrcNormX, (srcY + kInputViewportOriginY) * kSrcNormY));
  662. #else
  663. NVF4 op = NVTEX_SAMPLE(in_texture, samplerLinearClamp, NVF2((dstX + 0.5f) * kDstNormX, (dstY + 0.5f) * kDstNormY));
  664. #endif
  665. #if NIS_HDR_MODE == NIS_HDR_MODE_LINEAR
  666. const NVF kEps = 1e-4f;
  667. const NVF kNorm = 1.0f / (NIS_SCALE_FLOAT * kHDRCompressionFactor);
  668. const NVF opYN = max(opY, 0.0f) * kNorm;
  669. const NVF corr = (opYN * opYN + kEps) / (max(getYLinear(NVF3(op.x, op.y, op.z)), 0.0f) + kEps);
  670. op.x *= corr;
  671. op.y *= corr;
  672. op.z *= corr;
  673. #else
  674. const NVF corr = opY * (1.0f / NIS_SCALE_FLOAT) - getY(NVF3(op.x, op.y, op.z));
  675. op.x += corr;
  676. op.y += corr;
  677. op.z += corr;
  678. #endif
  679. #if NIS_VIEWPORT_SUPPORT
  680. NVTEX_STORE(out_texture, NVU2(dstX + kOutputViewportOriginX, dstY + kOutputViewportOriginY), op);
  681. #else
  682. NVTEX_STORE(out_texture, NVU2(dstX, dstY), op);
  683. #endif
  684. }
  685. }
  686. #else
  687. #ifndef NIS_BLOCK_WIDTH
  688. #define NIS_BLOCK_WIDTH 32
  689. #endif
  690. #ifndef NIS_BLOCK_HEIGHT
  691. #define NIS_BLOCK_HEIGHT 32
  692. #endif
  693. #ifndef NIS_THREAD_GROUP_SIZE
  694. #define NIS_THREAD_GROUP_SIZE 256
  695. #endif
  696. #define kSupportSize 5
  697. #define kNumPixelsX (NIS_BLOCK_WIDTH + kSupportSize + 1)
  698. #define kNumPixelsY (NIS_BLOCK_HEIGHT + kSupportSize + 1)
  699. NVSHARED NVF shPixelsY[kNumPixelsY][kNumPixelsX];
  700. NVF CalcLTIFast(const NVF y[5])
  701. {
  702. const NVF a_min = min(min(y[0], y[1]), y[2]);
  703. const NVF a_max = max(max(y[0], y[1]), y[2]);
  704. const NVF b_min = min(min(y[2], y[3]), y[4]);
  705. const NVF b_max = max(max(y[2], y[3]), y[4]);
  706. const NVF a_cont = a_max - a_min;
  707. const NVF b_cont = b_max - b_min;
  708. const NVF cont_ratio = max(a_cont, b_cont) / (min(a_cont, b_cont) + kEps * (1.0f / NIS_SCALE_FLOAT));
  709. return (1.0f - saturate((cont_ratio - kMinContrastRatio) * kRatioNorm)) * kContrastBoost;
  710. }
  711. NVF EvalUSM(const NVF pxl[5], const NVF sharpnessStrength, const NVF sharpnessLimit)
  712. {
  713. // USM profile
  714. NVF y_usm = -0.6001f * pxl[1] + 1.2002f * pxl[2] - 0.6001f * pxl[3];
  715. // boost USM profile
  716. y_usm *= sharpnessStrength;
  717. // clamp to the limit
  718. y_usm = min(sharpnessLimit, max(-sharpnessLimit, y_usm));
  719. // reduce ringing
  720. y_usm *= CalcLTIFast(pxl);
  721. return y_usm;
  722. }
  723. NVF4 GetDirUSM(const NVF p[5][5])
  724. {
  725. // sharpness boost & limit are the same for all directions
  726. const NVF scaleY = 1.0f - saturate((p[2][2] - kSharpStartY) * kSharpScaleY);
  727. // scale the ramp to sharpen as a function of luma
  728. const NVF sharpnessStrength = scaleY * kSharpStrengthScale + kSharpStrengthMin;
  729. // scale the ramp to limit USM as a function of luma
  730. const NVF sharpnessLimit = (scaleY * kSharpLimitScale + kSharpLimitMin) * p[2][2];
  731. NVF4 rval;
  732. // 0 deg filter
  733. NVF interp0Deg[5];
  734. {
  735. for (NVI i = 0; i < 5; ++i)
  736. {
  737. interp0Deg[i] = p[i][2];
  738. }
  739. }
  740. rval.x = EvalUSM(interp0Deg, sharpnessStrength, sharpnessLimit);
  741. // 90 deg filter
  742. NVF interp90Deg[5];
  743. {
  744. for (NVI i = 0; i < 5; ++i)
  745. {
  746. interp90Deg[i] = p[2][i];
  747. }
  748. }
  749. rval.y = EvalUSM(interp90Deg, sharpnessStrength, sharpnessLimit);
  750. //45 deg filter
  751. NVF interp45Deg[5];
  752. interp45Deg[0] = p[1][1];
  753. interp45Deg[1] = lerp(p[2][1], p[1][2], 0.5f);
  754. interp45Deg[2] = p[2][2];
  755. interp45Deg[3] = lerp(p[3][2], p[2][3], 0.5f);
  756. interp45Deg[4] = p[3][3];
  757. rval.z = EvalUSM(interp45Deg, sharpnessStrength, sharpnessLimit);
  758. //135 deg filter
  759. NVF interp135Deg[5];
  760. interp135Deg[0] = p[3][1];
  761. interp135Deg[1] = lerp(p[3][2], p[2][1], 0.5f);
  762. interp135Deg[2] = p[2][2];
  763. interp135Deg[3] = lerp(p[2][3], p[1][2], 0.5f);
  764. interp135Deg[4] = p[1][3];
  765. rval.w = EvalUSM(interp135Deg, sharpnessStrength, sharpnessLimit);
  766. return rval;
  767. }
  768. //-----------------------------------------------------------------------------------------------
  769. // NVSharpen
  770. //-----------------------------------------------------------------------------------------------
  771. void NVSharpen(NVU2 blockIdx, NVU threadIdx)
  772. {
  773. const NVI dstBlockX = NVI(NIS_BLOCK_WIDTH * blockIdx.x);
  774. const NVI dstBlockY = NVI(NIS_BLOCK_HEIGHT * blockIdx.y);
  775. // fill in input luma tile in batches of 2x2 pixels
  776. // we use texture gather to get extra support necessary
  777. // to compute 2x2 edge map outputs too
  778. const NVF kShift = 0.5f - kSupportSize / 2;
  779. for (NVI i = NVI(threadIdx) * 2; i < kNumPixelsX * kNumPixelsY / 2; i += NIS_THREAD_GROUP_SIZE * 2)
  780. {
  781. NVU2 pos = NVU2(i % kNumPixelsX, i / kNumPixelsX * 2);
  782. NIS_UNROLL
  783. for (NVI dy = 0; dy < 2; dy++)
  784. {
  785. NIS_UNROLL
  786. for (NVI dx = 0; dx < 2; dx++)
  787. {
  788. #if NIS_VIEWPORT_SUPPORT
  789. const NVF tx = (dstBlockX + pos.x + kInputViewportOriginX + dx + kShift) * kSrcNormX;
  790. const NVF ty = (dstBlockY + pos.y + kInputViewportOriginY + dy + kShift) * kSrcNormY;
  791. #else
  792. const NVF tx = (dstBlockX + pos.x + dx + kShift) * kSrcNormX;
  793. const NVF ty = (dstBlockY + pos.y + dy + kShift) * kSrcNormY;
  794. #endif
  795. const NVF4 px = NVTEX_SAMPLE(in_texture, samplerLinearClamp, NVF2(tx, ty));
  796. shPixelsY[pos.y + dy][pos.x + dx] = getY(px.xyz);
  797. }
  798. }
  799. }
  800. GroupMemoryBarrierWithGroupSync();
  801. for (NVI k = NVI(threadIdx); k < NIS_BLOCK_WIDTH * NIS_BLOCK_HEIGHT; k += NIS_THREAD_GROUP_SIZE)
  802. {
  803. const NVI2 pos = NVI2(k % NIS_BLOCK_WIDTH, k / NIS_BLOCK_WIDTH);
  804. // load 5x5 support to regs
  805. NVF p[5][5];
  806. NIS_UNROLL
  807. for (NVI i = 0; i < 5; ++i)
  808. {
  809. NIS_UNROLL
  810. for (NVI j = 0; j < 5; ++j)
  811. {
  812. p[i][j] = shPixelsY[pos.y + i][pos.x + j];
  813. }
  814. }
  815. // get directional filter bank output
  816. NVF4 dirUSM = GetDirUSM(p);
  817. // generate weights for directional filters
  818. NVF4 w = GetEdgeMap(p, kSupportSize / 2 - 1, kSupportSize / 2 - 1);
  819. // final USM is a weighted sum filter outputs
  820. const NVF usmY = (dirUSM.x * w.x + dirUSM.y * w.y + dirUSM.z * w.z + dirUSM.w * w.w);
  821. // do bilinear tap and correct rgb texel so it produces new sharpened luma
  822. const NVI dstX = dstBlockX + pos.x;
  823. const NVI dstY = dstBlockY + pos.y;
  824. #if NIS_VIEWPORT_SUPPORT
  825. if (dstX > kOutputViewportWidth || dstY > kOutputViewportHeight)
  826. {
  827. return;
  828. }
  829. #endif
  830. #if NIS_VIEWPORT_SUPPORT
  831. NVF4 op = NVTEX_SAMPLE(in_texture, samplerLinearClamp, NVF2((dstX + kInputViewportOriginX) * kSrcNormX, (dstY + kInputViewportOriginY) * kSrcNormY));
  832. #else
  833. NVF4 op = NVTEX_SAMPLE(in_texture, samplerLinearClamp, NVF2((dstX + 0.5f) * kDstNormX, (dstY + 0.5f) * kDstNormY));
  834. #endif
  835. #if NIS_HDR_MODE == NIS_HDR_MODE_LINEAR
  836. const NVF kEps = 1e-4f * kHDRCompressionFactor * kHDRCompressionFactor;
  837. NVF newY = p[2][2] + usmY;
  838. newY = max(newY, 0.0f);
  839. const NVF oldY = p[2][2];
  840. const NVF corr = (newY * newY + kEps) / (oldY * oldY + kEps);
  841. op.x *= corr;
  842. op.y *= corr;
  843. op.z *= corr;
  844. #else
  845. op.x += usmY;
  846. op.y += usmY;
  847. op.z += usmY;
  848. #endif
  849. #if NIS_VIEWPORT_SUPPORT
  850. NVTEX_STORE(out_texture, NVU2(dstX + kOutputViewportOriginX, dstY + kOutputViewportOriginY), op);
  851. #else
  852. NVTEX_STORE(out_texture, NVU2(dstX, dstY), op);
  853. #endif
  854. }
  855. }
  856. #endif