'''
Computation of optimal quantized linear prediction coefficients.
Given a block of samples, a prediction order and an LP coefficient precision
("denominator") we can compute the optimal quantized prediction coefficients
("optimal" in the sense of the lowest RMS of the prediction error) by
solving a certain integer-least-squares (ILS) problem. The solution is
computed using a QR factorization followed by a branch-and-bound algorithm to
solve an "upper-triangular" equation system for integer coefficients in a
least squares sense.
Copyright (c) 2020 Sebastian Gesemann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import scipy.linalg as la
import numpy as np
def block2r(block, lpc_max_ord):
'''takes a block of samples and turns it into an upper triangular
matrix "r" representing a reduced equation system for the least
squares problem. The right-hand side of the equation system
is the last column. So, for lpc_max_ord=4 you will get a 5x5
matrix.'''
blen = len(block)
rows = blen - lpc_max_ord
a = np.zeros((rows, lpc_max_ord + 1))
for k in range(lpc_max_ord):
a[:,k] = block[lpc_max_ord-1-k:blen-1-k]
# right hand side of equation system included as
# additional colum
a[:,lpc_max_ord] = block[lpc_max_ord:]
r = la.qr(a,mode='r')
return r[0][:lpc_max_ord+1,:] / np.sqrt(rows)
def estimate_rms_of_prediction_error(r, lpc_order):
'''estimates the RMS of the prediction error given an r matrix
and the lpc_order. This prediction error does not yet include
any errors due to the quantization of the prediction coefficients.
So, this RMS is kind of a lower bound based on the ideal
coefficients.'''
return la.norm(r[lpc_order:,-1])
def reduce_by(r, count):
'''takes an r and returns another one for lower prediction order
reduced by count.'''
topleft = r[:-1-count,:-1-count]
topright = r[:-1-count,-1,np.newaxis]
bottom = np.zeros((1,r.shape[1]-count))
bottom[-1,-1] = la.norm(r[-1-count:,-1])
return np.concatenate((
np.concatenate((topleft, topright), axis=1),
bottom
))
def reduce_to(r, count):
'''takes an r and returns another one for lower prediction order
of count.'''
return reduce_by(r, (r.shape[0] - 1) - count)
def integers_sorted_by_magnitude(positive_first = True):
'''generator for 0, 1, -1, 2, -2, 3, -3, 4, -4, ... (positive_first=True)
or 0, -1, 1, -2, 2, ... (positive_first = False)'''
yield 0
counter = 0
if positive_first:
sign1 = 1
sign2 = -1
else:
sign1 = -1
sign2 = 1
while True:
counter += 1
yield counter * sign1
yield counter * sign2
def quantization_search(r, scale):
n = r.shape[0] - 1
work = np.zeros((n,), dtype=np.int32)
best = [np.inf, None] # remember the best solution so far
zero = max(np.abs(r[0,0]), la.norm(r[:,-1])) * np.finfo(r.dtype).eps * np.sqrt(r.shape[0] * 500)
# any matrix coefficient with a magnitude equal to or below "zero"
# can be considered to be zero due to numerical rounding errors.
def recurse(i, sum_or_squared_errors_so_far):
'''search for LP coefficients work[k] with k=0,1,...,i'''
if best[0] <= sum_or_squared_errors_so_far:
return False # we cannot improve in this branch, so, abort
if i == -1: # leaf node of the decision tree = no further decision to make
if sum_or_squared_errors_so_far < best[0]:
# new best solution! :-)
best[0] = sum_or_squared_errors_so_far
best[1] = np.copy(work)
return True
rii = r[i,i]
rhs = r[i,-1] * scale - np.dot(r[i,i+1:-1],work[i+1:])
if np.abs(rii) <= zero:
# column i seems to be a linear combination of the previous
# columns (0...i-1) and thus does not add anything "new"
# By setting this coefficient to zero and moving on we avoid
# a "division by zero" problem at no cost.
work[i] = 0.0
return recurse(i - 1, sum_or_squared_errors_so_far + rhs**2)
else:
u = rhs / rii # (unquantized) ideal coefficient
q = np.round(u) # quantized to closest integer
for offset in integers_sorted_by_magnitude(positive_first = u > q):
work[i] = q + offset
err = rhs - rii * work[i]
if not recurse(i - 1, sum_or_squared_errors_so_far + err**2):
# we cannot imporove by going further, abort
return False
recurse(n - 1, 0.0)
perr = r[-1,-1] * scale
best[0] = np.sqrt(best[0] + perr**2) / scale
return best
block = np.array([1,2,3,2,3,4,5,4,3,2,3,2,1,2,3,4,3,2,1]);
lpc_max_ord = 4
rfull = block2r(block,lpc_max_ord);
print('RMS of prediction errors based on ideal coefficients...')
for o in range(lpc_max_ord+1):
rmse = estimate_rms_of_prediction_error(rfull, o);
print('LPC order =',o,'==> RMS =',rmse);
o = 3 # pick order
denominator = 16
print('using o =', o, 'and denominator =',denominator)
r = reduce_to(rfull, o)
print(r)
real = la.solve_triangular(r[:-1,:-1],r[:-1,-1] * denominator)
print('real-valued LP coefs =',real,'/',denominator)
best = quantization_search(r, denominator)
print('optimal quantization =',best[1],'/',denominator)
print('expected RMS with quantized LP coefs =', best[0])