@article {66,
title = {Optimization and Analysis of Distributed Averaging with Short Node Memory},
year = {2009},
month = {03/2009},
institution = {Department of Electrical and Computer Engineering, McGill University},
type = {Technical Report},
address = {Montreal, QC, Canada},
abstract = {Distributed averaging describes a class of network algorithms for the decentralized computation of aggregate
statistics. Initially, each node has a scalar data value, and the goal is to compute the average of these values at every
node (the so-called average consensus problem). Nodes iteratively exchange information with their neighbors and
perform local updates until the value at every node converges to the initial network average. Much previous work
has focused on algorithms where each node maintains and updates a single value; every time an update is performed,
the previous value is forgotten. Convergence to the average consensus is achieved asymptotically. The convergence
rate is fundamentally limited by network connectivity, and it can be prohibitively slow on topologies such as grids
and random geometric graphs, even if the update rules are optimized. In this paper, we provide the first theoretical
demonstration that adding a local prediction component to the update rule can significantly improve the convergence
rate of distributed averaging algorithms. We focus on the case where the local predictor is a linear combination of
the node{\textquoteright}s current and previous values (i.e., two memory taps), and our update rule computes a combination of the
predictor and the usual weighted linear combination of values received from neighbouring nodes. We derive the
optimal mixing parameter for combining the predictor with the neighbors{\textquoteright} values, and conduct a theoretical analysis
of the improvement in convergence rate that can be achieved using this acceleration methodology. For a chain
topology on N nodes, this leads to a factor of N improvement over standard consensus, and for a two-dimensional
grid, our approach achieves a factor of prescribed level of accuracy.√N improvement, in terms of the number of iterations required to reach a},
keywords = {average consensus, distributed signal processing, linear prediction},
attachments = {http://networks.ece.mcgill.ca/sites/default/files/oreshkin_TechReport09.pdf},
author = {Boris N. Oreshkin and Mark J. Coates and Michael G. Rabbat}
}