1# 2# Copyright 2022 Google LLC 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); 5# you may not use this file except in compliance with the License. 6# You may obtain a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, 12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13# See the License for the specific language governing permissions and 14# limitations under the License. 15# 16 17import numpy as np 18 19import lc3 20import tables as T, appendix_c as C 21 22### ------------------------------------------------------------------------ ### 23 24class Tns: 25 26 SUB_LIM_10M_NB = [ [ 12, 34, 57, 80 ] ] 27 SUB_LIM_10M_WB = [ [ 12, 61, 110, 160 ] ] 28 SUB_LIM_10M_SSWB = [ [ 12, 88, 164, 240 ] ] 29 SUB_LIM_10M_SWB = [ [ 12, 61, 110, 160 ], [ 160, 213, 266, 320 ] ] 30 SUB_LIM_10M_FB = [ [ 12, 74, 137, 200 ], [ 200, 266, 333, 400 ] ] 31 32 SUB_LIM_10M = [ SUB_LIM_10M_NB, SUB_LIM_10M_WB, 33 SUB_LIM_10M_SSWB, SUB_LIM_10M_SWB, SUB_LIM_10M_FB ] 34 35 SUB_LIM_7M5_NB = [ [ 9, 26, 43, 60 ] ] 36 SUB_LIM_7M5_WB = [ [ 9, 46, 83, 120 ] ] 37 SUB_LIM_7M5_SSWB = [ [ 9, 66, 123, 180 ] ] 38 SUB_LIM_7M5_SWB = [ [ 9, 46, 82, 120 ], [ 120, 159, 200, 240 ] ] 39 SUB_LIM_7M5_FB = [ [ 9, 56, 103, 150 ], [ 150, 200, 250, 300 ] ] 40 41 SUB_LIM_7M5 = [ SUB_LIM_7M5_NB, SUB_LIM_7M5_WB, 42 SUB_LIM_7M5_SSWB, SUB_LIM_7M5_SWB, SUB_LIM_7M5_FB ] 43 44 SUB_LIM = [ SUB_LIM_7M5, SUB_LIM_10M ] 45 46 FREQ_LIM_10M_NB = [ 12, 80 ] 47 FREQ_LIM_10M_WB = [ 12, 160 ] 48 FREQ_LIM_10M_SSWB = [ 12, 240 ] 49 FREQ_LIM_10M_SWB = [ 12, 160, 320 ] 50 FREQ_LIM_10M_FB = [ 12, 200, 400 ] 51 52 FREQ_LIM_10M = [ FREQ_LIM_10M_NB, FREQ_LIM_10M_WB, 53 FREQ_LIM_10M_SSWB, FREQ_LIM_10M_SWB, FREQ_LIM_10M_FB ] 54 55 FREQ_LIM_7M5_NB = [ 9, 60 ] 56 FREQ_LIM_7M5_WB = [ 9, 120 ] 57 FREQ_LIM_7M5_SSWB = [ 9, 180 ] 58 FREQ_LIM_7M5_SWB = [ 9, 120, 240 ] 59 FREQ_LIM_7M5_FB = [ 9, 150, 300 ] 60 61 FREQ_LIM_7M5 = [ FREQ_LIM_7M5_NB, FREQ_LIM_7M5_WB, 62 FREQ_LIM_7M5_SSWB, FREQ_LIM_7M5_SWB, FREQ_LIM_7M5_FB ] 63 64 FREQ_LIM = [ FREQ_LIM_7M5, FREQ_LIM_10M ] 65 66 def __init__(self, dt): 67 68 self.dt = dt 69 70 (self.nfilters, self.lpc_weighting, self.rc_order, self.rc) = \ 71 (0, False, np.array([ 0, 0 ]), np.array([ 0, 0 ])) 72 73 def get_data(self): 74 75 return { 'nfilters' : self.nfilters, 76 'lpc_weighting' : self.lpc_weighting, 77 'rc_order' : self.rc_order, 'rc' : self.rc - 8 } 78 79 def get_nbits(self): 80 81 lpc_weighting = self.lpc_weighting 82 nbits = 0 83 84 for f in range(self.nfilters): 85 rc_order = self.rc_order[f] 86 rc = self.rc[f] 87 88 nbits_order = T.TNS_ORDER_BITS[int(lpc_weighting)][rc_order] 89 nbits_coef = sum([ T.TNS_COEF_BITS[k][rc[k]] 90 for k in range(rc_order) ]) 91 92 nbits += ((2048 + nbits_order + nbits_coef) + 2047) >> 11 93 94 return nbits 95 96 97class TnsAnalysis(Tns): 98 99 def __init__(self, dt): 100 101 super().__init__(dt) 102 103 def compute_lpc_coeffs(self, bw, f, x): 104 105 ### Normalized autocorrelation function 106 107 S = Tns.SUB_LIM[self.dt][bw][f] 108 109 r = np.append([ 3 ], np.zeros(8)) 110 e = [ sum(x[S[s]:S[s+1]] ** 2) for s in range(3) ] 111 112 for k in range(len(r) if sum(e) > 0 else 0): 113 c = [ np.dot(x[S[s]:S[s+1]-k], x[S[s]+k:S[s+1]]) 114 for s in range(3) ] 115 116 r[k] = np.sum( np.array(c) / np.array(e) ) 117 118 r *= np.exp(-0.5 * (0.02 * np.pi * np.arange(9)) ** 2) 119 120 ### Levinson-Durbin recursion 121 122 err = r[0] 123 a = np.ones(len(r)) 124 125 for k in range(1, len(a)): 126 127 rc = -sum(a[:k] * r[k:0:-1]) / err 128 129 a[1:k] += rc * a[k-1:0:-1] 130 a[k] = rc 131 132 err *= 1 - rc ** 2 133 134 return (r[0] / err, a) 135 136 def lpc_weight(self, pred_gain, a): 137 138 gamma = 1 - (1 - 0.85) * (2 - pred_gain) / (2 - 1.5) 139 return a * np.power(gamma, np.arange(len(a))) 140 141 def coeffs_reflexion(self, a): 142 143 rc = np.zeros(8) 144 b = a.copy() 145 146 for k in range(8, 0, -1): 147 rc[k-1] = b[k] 148 e = 1 - rc[k-1] ** 2 149 b[1:k] = (b[1:k] - rc[k-1] * b[k-1:0:-1]) / e 150 151 return rc 152 153 def quantization(self, rc, lpc_weighting): 154 155 delta = np.pi / 17 156 rc_i = np.rint(np.arcsin(rc) / delta).astype(int) + 8 157 rc_q = np.sin(delta * (rc_i - 8)) 158 159 rc_order = len(rc_i) - np.argmin(rc_i[::-1] == 8) 160 161 return (rc_order, rc_q, rc_i) 162 163 def filtering(self, st, x, rc_order, rc): 164 165 y = np.empty(len(x)) 166 167 for i in range(len(x)): 168 169 xi = x[i] 170 s1 = xi 171 172 for k in range(rc_order): 173 s0 = st[k] 174 st[k] = s1 175 176 s1 = rc[k] * xi + s0 177 xi += rc[k] * s0 178 179 y[i] = xi 180 181 return y 182 183 def run(self, x, bw, nn_flag, nbytes): 184 185 fstate = np.zeros(8) 186 y = x.copy() 187 188 self.nfilters = len(Tns.SUB_LIM[self.dt][bw]) 189 self.lpc_weighting = nbytes * 8 < 48 * T.DT_MS[self.dt] 190 self.rc_order = np.zeros(2, dtype=np.int) 191 self.rc = np.zeros((2, 8), dtype=np.int) 192 193 for f in range(self.nfilters): 194 195 (pred_gain, a) = self.compute_lpc_coeffs(bw, f, x) 196 197 tns_off = pred_gain <= 1.5 or nn_flag 198 if tns_off: 199 continue 200 201 if self.lpc_weighting and pred_gain < 2: 202 a = self.lpc_weight(pred_gain, a) 203 204 rc = self.coeffs_reflexion(a) 205 206 (rc_order, rc_q, rc_i) = \ 207 self.quantization(rc, self.lpc_weighting) 208 209 self.rc_order[f] = rc_order 210 self.rc[f] = rc_i 211 212 if rc_order > 0: 213 i0 = Tns.FREQ_LIM[self.dt][bw][f] 214 i1 = Tns.FREQ_LIM[self.dt][bw][f+1] 215 216 y[i0:i1] = self.filtering( 217 fstate, x[i0:i1], rc_order, rc_q) 218 219 return y 220 221 def store(self, b): 222 223 for f in range(self.nfilters): 224 lpc_weighting = self.lpc_weighting 225 rc_order = self.rc_order[f] 226 rc = self.rc[f] 227 228 b.write_bit(min(rc_order, 1)) 229 230 if rc_order > 0: 231 b.ac_encode( 232 T.TNS_ORDER_CUMFREQ[int(lpc_weighting)][rc_order-1], 233 T.TNS_ORDER_FREQ[int(lpc_weighting)][rc_order-1] ) 234 235 for k in range(rc_order): 236 b.ac_encode(T.TNS_COEF_CUMFREQ[k][rc[k]], 237 T.TNS_COEF_FREQ[k][rc[k]] ) 238 239 240class TnsSynthesis(Tns): 241 242 def filtering(self, st, x, rc_order, rc): 243 244 y = x.copy() 245 246 for i in range(len(x)): 247 248 xi = x[i] - rc[rc_order-1] * st[rc_order-1] 249 for k in range(rc_order-2, -1, -1): 250 xi -= rc[k] * st[k] 251 st[k+1] = xi * rc[k] + st[k]; 252 st[0] = xi; 253 254 y[i] = xi 255 256 return y 257 258 def load(self, b, bw, nbytes): 259 260 self.nfilters = len(Tns.SUB_LIM[self.dt][bw]) 261 self.lpc_weighting = nbytes * 8 < 48 * T.DT_MS[self.dt] 262 self.rc_order = np.zeros(2, dtype=np.int) 263 self.rc = 8 * np.ones((2, 8), dtype=np.int) 264 265 for f in range(self.nfilters): 266 267 if not b.read_bit(): 268 continue 269 270 rc_order = 1 + b.ac_decode( 271 T.TNS_ORDER_CUMFREQ[int(self.lpc_weighting)], 272 T.TNS_ORDER_FREQ[int(self.lpc_weighting)]) 273 274 self.rc_order[f] = rc_order 275 276 for k in range(rc_order): 277 rc = b.ac_decode(T.TNS_COEF_CUMFREQ[k], T.TNS_COEF_FREQ[k]) 278 self.rc[f][k] = rc 279 280 def run(self, x, bw): 281 282 fstate = np.zeros(8) 283 y = x.copy() 284 285 for f in range(self.nfilters): 286 287 rc_order = self.rc_order[f] 288 rc = np.sin((np.pi / 17) * (self.rc[f] - 8)) 289 290 if rc_order > 0: 291 i0 = Tns.FREQ_LIM[self.dt][bw][f] 292 i1 = Tns.FREQ_LIM[self.dt][bw][f+1] 293 294 y[i0:i1] = self.filtering( 295 fstate, x[i0:i1], rc_order, rc) 296 297 return y 298 299 300### ------------------------------------------------------------------------ ### 301 302def check_analysis(rng, dt, bw): 303 304 ok = True 305 306 analysis = TnsAnalysis(dt) 307 nbytes_lim = int((48 * T.DT_MS[dt]) // 8) 308 309 for i in range(10): 310 x = rng.random(T.NE[dt][bw]) * 1e2 311 x = pow(x, .5 + i/5) 312 313 for nn_flag in (True, False): 314 for nbytes in (nbytes_lim, nbytes_lim + 1): 315 316 y = analysis.run(x, bw, nn_flag, nbytes) 317 (y_c, data_c) = lc3.tns_analyze(dt, bw, nn_flag, nbytes, x) 318 319 ok = ok and data_c['nfilters'] == analysis.nfilters 320 ok = ok and data_c['lpc_weighting'] == analysis.lpc_weighting 321 for f in range(analysis.nfilters): 322 rc_order = analysis.rc_order[f] 323 rc_order_c = data_c['rc_order'][f] 324 rc_c = 8 + data_c['rc'][f] 325 ok = ok and rc_order_c == rc_order 326 ok = ok and not np.any((rc_c - analysis.rc[f])[:rc_order]) 327 328 ok = ok and lc3.tns_get_nbits(data_c) == analysis.get_nbits() 329 ok = ok and np.amax(np.abs(y_c - y)) < 1e-2 330 331 return ok 332 333def check_synthesis(rng, dt, bw): 334 335 ok = True 336 synthesis = TnsSynthesis(dt) 337 338 for i in range(100): 339 340 x = rng.random(T.NE[dt][bw]) * 1e2 341 342 synthesis.nfilters = 1 + int(bw >= T.SRATE_32K) 343 synthesis.rc_order = rng.integers(0, 9, 2) 344 synthesis.rc = rng.integers(0, 17, 16).reshape(2, 8) 345 346 y = synthesis.run(x, bw) 347 y_c = lc3.tns_synthesize(dt, bw, synthesis.get_data(), x) 348 349 ok = ok and np.amax(np.abs(y_c - y) < 1e-6) 350 351 return ok 352 353def check_analysis_appendix_c(dt): 354 355 sr = T.SRATE_16K 356 ok = True 357 358 fs = Tns.FREQ_LIM[dt][sr][0] 359 fe = Tns.FREQ_LIM[dt][sr][1] 360 st = np.zeros(8) 361 362 for i in range(len(C.X_S[dt])): 363 364 (_, a) = lc3.tns_compute_lpc_coeffs(dt, sr, C.X_S[dt][i]) 365 ok = ok and np.amax(np.abs(a[0] - C.TNS_LEV_A[dt][i])) < 1e-5 366 367 rc = lc3.tns_lpc_reflection(a[0]) 368 ok = ok and np.amax(np.abs(rc - C.TNS_LEV_RC[dt][i])) < 1e-5 369 370 (rc_order, rc_i) = lc3.tns_quantize_rc(C.TNS_LEV_RC[dt][i]) 371 ok = ok and rc_order == C.RC_ORDER[dt][i][0] 372 ok = ok and np.any((rc_i + 8) - C.RC_I_1[dt][i] == 0) 373 374 rc_q = lc3.tns_unquantize_rc(rc_i, rc_order) 375 ok = ok and np.amax(np.abs(rc_q - C.RC_Q_1[dt][i])) < 1e-6 376 377 (x, side) = lc3.tns_analyze(dt, sr, False, C.NBYTES[dt], C.X_S[dt][i]) 378 ok = ok and side['nfilters'] == 1 379 ok = ok and side['rc_order'][0] == C.RC_ORDER[dt][i][0] 380 ok = ok and not np.any((side['rc'][0] + 8) - C.RC_I_1[dt][i]) 381 ok = ok and lc3.tns_get_nbits(side) == C.NBITS_TNS[dt][i] 382 ok = ok and np.amax(np.abs(x - C.X_F[dt][i])) < 1e-3 383 384 return ok 385 386def check_synthesis_appendix_c(dt): 387 388 sr = T.SRATE_16K 389 ok = True 390 391 for i in range(len(C.X_HAT_Q[dt])): 392 393 side = { 394 'nfilters' : 1, 395 'lpc_weighting' : C.NBYTES[dt] * 8 < 48 * T.DT_MS[dt], 396 'rc_order': C.RC_ORDER[dt][i], 397 'rc': [ C.RC_I_1[dt][i] - 8, C.RC_I_2[dt][i] - 8 ] 398 } 399 400 g_int = C.GG_IND_ADJ[dt][i] + C.GG_OFF[dt][i] 401 x = C.X_HAT_Q[dt][i] * (10 ** (g_int / 28)) 402 403 x = lc3.tns_synthesize(dt, sr, side, x) 404 ok = ok and np.amax(np.abs(x - C.X_HAT_TNS[dt][i])) < 1e-3 405 406 if dt != T.DT_10M: 407 return ok 408 409 sr = T.SRATE_48K 410 411 side = { 412 'nfilters' : 2, 413 'lpc_weighting' : False, 414 'rc_order': C.RC_ORDER_48K_10M, 415 'rc': [ C.RC_I_1_48K_10M - 8, C.RC_I_2_48K_10M - 8 ] 416 } 417 418 x = C.X_HAT_F_48K_10M 419 x = lc3.tns_synthesize(dt, sr, side, x) 420 ok = ok and np.amax(np.abs(x - C.X_HAT_TNS_48K_10M)) < 1e-3 421 422 return ok 423 424def check(): 425 426 rng = np.random.default_rng(1234) 427 ok = True 428 429 for dt in range(T.NUM_DT): 430 for sr in range(T.NUM_SRATE): 431 ok = ok and check_analysis(rng, dt, sr) 432 ok = ok and check_synthesis(rng, dt, sr) 433 434 for dt in range(T.NUM_DT): 435 ok = ok and check_analysis_appendix_c(dt) 436 ok = ok and check_synthesis_appendix_c(dt) 437 438 return ok 439 440### ------------------------------------------------------------------------ ### 441