Generalised electron-boson coupled cluster.
ebcc.cc.gebcc.GEBCC(mf, log=None, ansatz='CCSD', options=None, space=None, omega=None, g=None, G=None, mo_coeff=None, mo_occ=None, fock=None, **kwargs)
Bases: BaseEBCC
Restricted electron-boson coupled cluster.
Initialise the EBCC object.
Parameters: |
|
---|
Source code in ebcc/cc/base.py
def __init__(
self,
mf: SCF,
log: Optional[Logger] = None,
ansatz: Optional[Union[Ansatz, str]] = "CCSD",
options: Optional[BaseOptions] = None,
space: Optional[SpaceType] = None,
omega: Optional[NDArray[T]] = None,
g: Optional[NDArray[T]] = None,
G: Optional[NDArray[T]] = None,
mo_coeff: Optional[NDArray[T]] = None,
mo_occ: Optional[NDArray[T]] = None,
fock: Optional[BaseFock] = None,
**kwargs: Any,
) -> None:
r"""Initialise the EBCC object.
Args:
mf: PySCF mean-field object.
log: Log to write output to. Default is the global logger, outputting to `stderr`.
ansatz: Overall ansatz.
options: Options for the EBCC calculation.
space: Space containing the frozen, correlated, and active fermionic spaces. Default
assumes all electrons are correlated.
omega: Bosonic frequencies.
g: Electron-boson coupling matrix corresponding to the bosonic annihilation operator
:math:`g_{bpq} p^\dagger q b`. The creation part is assumed to be the fermionic
conjugate transpose to retain Hermiticity in the Hamiltonian.
G: Boson non-conserving term :math:`G_{b} (b^\dagger + b)`.
mo_coeff: Molecular orbital coefficients. Default is the mean-field coefficients.
mo_occ: Molecular orbital occupation numbers. Default is the mean-field occupation.
fock: Fock matrix. Default is the mean-field Fock matrix.
**kwargs: Additional keyword arguments used to update `options`.
"""
# Options:
if options is None:
options = self.Options()
self.options = options
for key, val in kwargs.items():
setattr(self.options, key, val)
# Parameters:
self.log = default_log if log is None else log
self.mf = self._convert_mf(mf)
self._mo_coeff: Optional[NDArray[T]] = (
np.asarray(mo_coeff, dtype=types[float]) if mo_coeff is not None else None
)
self._mo_occ: Optional[NDArray[T]] = (
np.asarray(mo_occ, dtype=types[float]) if mo_occ is not None else None
)
# Ansatz:
if isinstance(ansatz, Ansatz):
self.ansatz = ansatz
elif isinstance(ansatz, str):
self.ansatz = Ansatz.from_string(
ansatz, density_fitting=getattr(self.mf, "with_df", None) is not None
)
else:
raise TypeError("ansatz must be an Ansatz object or a string.")
self._eqns = self.ansatz._get_eqns(self.spin_type)
# Space:
if space is not None:
self.space = space
else:
self.space = self.init_space()
# Boson parameters:
if bool(self.fermion_coupling_rank) != bool(self.boson_coupling_rank):
raise ValueError(
"Fermionic and bosonic coupling ranks must both be zero, or both non-zero."
)
self.omega = np.asarray(omega, dtype=types[float]) if omega is not None else None
self.bare_g = np.asarray(g, dtype=types[float]) if g is not None else None
self.bare_G = np.asarray(G, dtype=types[float]) if G is not None else None
if self.boson_ansatz != "":
self.g = self.get_g()
self.G = self.get_mean_field_G()
else:
assert self.nbos == 0
self.options.shift = False
self.g = None
self.G = None
# Fock matrix:
if fock is None:
self.fock = self.get_fock()
else:
self.fock = fock
# Attributes:
self.e_corr = 0.0
self.amplitudes = util.Namespace()
self.converged = False
self.lambdas = util.Namespace()
self.converged_lambda = False
# Logging:
init_logging(self.log)
self.log.info(f"\n{ANSI.B}{ANSI.U}{self.name}{ANSI.R}")
self.log.debug(f"{ANSI.B}{'*' * len(self.name)}{ANSI.R}")
self.log.debug("")
self.log.info(f"{ANSI.B}Options{ANSI.R}:")
self.log.info(f" > e_tol: {ANSI.y}{self.options.e_tol}{ANSI.R}")
self.log.info(f" > t_tol: {ANSI.y}{self.options.t_tol}{ANSI.R}")
self.log.info(f" > max_iter: {ANSI.y}{self.options.max_iter}{ANSI.R}")
self.log.info(f" > diis_space: {ANSI.y}{self.options.diis_space}{ANSI.R}")
self.log.info(f" > diis_min_space: {ANSI.y}{self.options.diis_min_space}{ANSI.R}")
self.log.info(f" > damping: {ANSI.y}{self.options.damping}{ANSI.R}")
self.log.debug("")
self.log.info(f"{ANSI.B}Ansatz{ANSI.R}: {ANSI.m}{self.ansatz}{ANSI.R}")
self.log.debug("")
self.log.info(f"{ANSI.B}Space{ANSI.R}: {ANSI.m}{self.space}{ANSI.R}")
self.log.debug("")
if self.boson_ansatz != "":
self.log.info(f"{ANSI.B}Bosons{ANSI.R}: {ANSI.m}{self.nbos}{ANSI.R}")
self.log.info(" > Energy shift due to polaritonic basis: %.10f", self.const)
self.log.debug("")
ebcc.cc.gebcc.GEBCC.spin_type: str
property
Get a string representation of the spin type.
ebcc.cc.gebcc.GEBCC.xi: NDArray[T]
property
Get the shift in the bosonic operators to diagonalise the photon Hamiltonian.
Returns: |
|
---|
ebcc.cc.gebcc.GEBCC.nmo: int
property
Get the number of molecular orbitals.
Returns: |
|
---|
ebcc.cc.gebcc.GEBCC.nocc: int
property
Get the number of occupied molecular orbitals.
Returns: |
|
---|
ebcc.cc.gebcc.GEBCC.nvir: int
property
Get the number of virtual molecular orbitals.
Returns: |
|
---|
ebcc.cc.gebcc.GEBCC.ip_eom(**kwargs)
Get the IP-EOM object.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def ip_eom(self, **kwargs: Any) -> IP_GEOM:
"""Get the IP-EOM object.
Args:
**kwargs: Additional keyword arguments.
Returns:
IP-EOM object.
"""
return IP_GEOM(self, **kwargs)
ebcc.cc.gebcc.GEBCC.ea_eom(**kwargs)
Get the EA-EOM object.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def ea_eom(self, **kwargs: Any) -> EA_GEOM:
"""Get the EA-EOM object.
Args:
**kwargs: Additional keyword arguments.
Returns:
EA-EOM object.
"""
return EA_GEOM(self, **kwargs)
ebcc.cc.gebcc.GEBCC.ee_eom(**kwargs)
Get the EE-EOM object.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def ee_eom(self, **kwargs: Any) -> EE_GEOM:
"""Get the EE-EOM object.
Args:
**kwargs: Additional keyword arguments.
Returns:
EE-EOM object.
"""
return EE_GEOM(self, **kwargs)
ebcc.cc.gebcc.GEBCC.from_uebcc(ucc)
classmethod
Initialise a GEBCC
object from an UEBCC
object.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
@classmethod
def from_uebcc(cls, ucc: UEBCC) -> GEBCC:
"""Initialise a `GEBCC` object from an `UEBCC` object.
Args:
ucc: Unrestricted electron-boson coupled cluster object.
Returns:
GEBCC object.
"""
orbspin = np.asarray(scf.addons.get_ghf_orbspin(ucc.mf.mo_energy, ucc.mf.mo_occ, False))
nocc = ucc.space[0].nocc + ucc.space[1].nocc
nvir = ucc.space[0].nvir + ucc.space[1].nvir
nbos = ucc.nbos
sa = np.where(orbspin == 0)[0]
sb = np.where(orbspin == 1)[0]
occupied = np.zeros((nocc + nvir,), dtype=np.bool_)
occupied = _put(occupied, sa, np.copy(ucc.space[0]._occupied))
occupied = _put(occupied, sb, np.copy(ucc.space[1]._occupied))
frozen = np.zeros((nocc + nvir,), dtype=np.bool_)
frozen = _put(frozen, sa, np.copy(ucc.space[0]._frozen))
frozen = _put(frozen, sb, np.copy(ucc.space[1]._frozen))
active = np.zeros((nocc + nvir,), dtype=np.bool_)
active = _put(active, sa, np.copy(ucc.space[0]._active))
active = _put(active, sb, np.copy(ucc.space[1]._active))
space = Space(occupied, frozen, active)
slices = util.Namespace(
a=util.Namespace(**{k: np.where(orbspin[space.mask(k)] == 0)[0] for k in "oOivVa"}),
b=util.Namespace(**{k: np.where(orbspin[space.mask(k)] == 1)[0] for k in "oOivVa"}),
)
g: Optional[NDArray[T]] = None
if ucc.bare_g is not None:
if ucc.bare_g.ndim == 3:
bare_g_a = bare_g_b = ucc.bare_g
else:
bare_g_a, bare_g_b = ucc.bare_g
g = np.zeros((ucc.nbos, ucc.nmo * 2, ucc.nmo * 2), dtype=types[float])
g = _put(g, np.ix_(np.arange(ucc.nbos), sa, sa), np.copy(bare_g_a))
g = _put(g, np.ix_(np.arange(ucc.nbos), sb, sb), np.copy(bare_g_b))
gcc = cls(
ucc.mf,
log=ucc.log,
ansatz=ucc.ansatz,
space=space,
omega=ucc.omega,
g=g,
G=ucc.bare_G,
options=ucc.options,
)
gcc.e_corr = ucc.e_corr
gcc.converged = ucc.converged
gcc.converged_lambda = ucc.converged_lambda
has_amps = bool(ucc.amplitudes)
has_lams = bool(ucc.lambdas)
if has_amps:
amplitudes: Namespace[SpinArrayType] = util.Namespace()
for name, key, n in ucc.ansatz.fermionic_cluster_ranks(spin_type=ucc.spin_type):
shape = tuple(space.size(k) for k in key)
amplitudes[name] = np.zeros(shape, dtype=types[float])
for comb in util.generate_spin_combinations(n, unique=True):
done = set()
for lperm, lsign in util.permutations_with_signs(tuple(range(n))):
for uperm, usign in util.permutations_with_signs(tuple(range(n))):
combn = util.permute_string(comb[:n], lperm)
combn += util.permute_string(comb[n:], uperm)
if combn in done:
continue
mask = np.ix_(*[slices[s][k] for s, k in zip(combn, key)])
transpose = tuple(lperm) + tuple(p + n for p in uperm)
amp = (
np.transpose(getattr(ucc.amplitudes[name], comb), transpose)
* lsign
* usign
)
for perm, sign in util.permutations_with_signs(tuple(range(n))):
transpose = tuple(perm) + tuple(range(n, 2 * n))
if util.permute_string(comb[:n], perm) == comb[:n]:
amplitudes[name] = _put(
amplitudes[name],
mask,
amplitudes[name][mask]
+ np.transpose(amp, transpose) * sign,
)
done.add(combn)
for name, key, n in ucc.ansatz.bosonic_cluster_ranks(spin_type=ucc.spin_type):
amplitudes[name] = np.copy(ucc.amplitudes[name]) # type: ignore
for name, key, nf, nb in ucc.ansatz.coupling_cluster_ranks(spin_type=ucc.spin_type):
shape = (nbos,) * nb + tuple(space.size(k) for k in key[nb:])
amplitudes[name] = np.zeros(shape, dtype=types[float])
for comb in util.generate_spin_combinations(nf):
done = set()
for lperm, lsign in util.permutations_with_signs(tuple(range(nf))):
for uperm, usign in util.permutations_with_signs(tuple(range(nf))):
combn = util.permute_string(comb[:nf], lperm)
combn += util.permute_string(comb[nf:], uperm)
if combn in done:
continue
mask = np.ix_(
*([np.arange(nbos)] * nb),
*[slices[s][k] for s, k in zip(combn, key[nb:])],
)
transpose = (
tuple(range(nb))
+ tuple(p + nb for p in lperm)
+ tuple(p + nb + nf for p in uperm)
)
amp = (
np.transpose(getattr(ucc.amplitudes[name], comb), transpose)
* lsign
* usign
)
for perm, sign in util.permutations_with_signs(tuple(range(nf))):
transpose = (
tuple(range(nb))
+ tuple(p + nb for p in perm)
+ tuple(range(nb + nf, nb + 2 * nf))
)
if util.permute_string(comb[:nf], perm) == comb[:nf]:
amplitudes[name] = _put(
amplitudes[name],
mask,
amplitudes[name][mask]
+ np.transpose(amp, transpose) * sign,
)
done.add(combn)
gcc.amplitudes = amplitudes
if has_lams:
lambdas = gcc.init_lams() # Easier this way - but have to build ERIs...
for name, key, n in ucc.ansatz.fermionic_cluster_ranks(spin_type=ucc.spin_type):
lname = name.replace("t", "l")
shape = tuple(space.size(k) for k in key[n:] + key[:n])
lambdas[lname] = np.zeros(shape, dtype=types[float])
for comb in util.generate_spin_combinations(n, unique=True):
done = set()
for lperm, lsign in util.permutations_with_signs(tuple(range(n))):
for uperm, usign in util.permutations_with_signs(tuple(range(n))):
combn = util.permute_string(comb[:n], lperm)
combn += util.permute_string(comb[n:], uperm)
if combn in done:
continue
mask = np.ix_(*[slices[s][k] for s, k in zip(combn, key[n:] + key[:n])])
transpose = tuple(lperm) + tuple(p + n for p in uperm)
amp = (
np.transpose(getattr(ucc.lambdas[lname], comb), transpose)
* lsign
* usign
)
for perm, sign in util.permutations_with_signs(tuple(range(n))):
transpose = tuple(perm) + tuple(range(n, 2 * n))
if util.permute_string(comb[:n], perm) == comb[:n]:
lambdas[lname] = _put(
lambdas[lname],
mask,
lambdas[lname][mask] + np.transpose(amp, transpose) * sign,
)
done.add(combn)
for name, key, n in ucc.ansatz.bosonic_cluster_ranks(spin_type=ucc.spin_type):
lname = "l" + name
lambdas[lname] = np.copy(ucc.lambdas[lname]) # type: ignore
for name, key, nf, nb in ucc.ansatz.coupling_cluster_ranks(spin_type=ucc.spin_type):
lname = "l" + name
shape = (nbos,) * nb + tuple(
space.size(k) for k in key[nb + nf :] + key[nb : nb + nf]
)
lambdas[lname] = np.zeros(shape, dtype=types[float])
for comb in util.generate_spin_combinations(nf, unique=True):
done = set()
for lperm, lsign in util.permutations_with_signs(tuple(range(nf))):
for uperm, usign in util.permutations_with_signs(tuple(range(nf))):
combn = util.permute_string(comb[:nf], lperm)
combn += util.permute_string(comb[nf:], uperm)
if combn in done:
continue
mask = np.ix_(
*([np.arange(nbos)] * nb),
*[
slices[s][k]
for s, k in zip(combn, key[nb + nf :] + key[nb : nb + nf])
],
)
transpose = (
tuple(range(nb))
+ tuple(p + nb for p in lperm)
+ tuple(p + nb + nf for p in uperm)
)
amp = (
np.transpose(getattr(ucc.lambdas[lname], comb), transpose)
* lsign
* usign
)
for perm, sign in util.permutations_with_signs(tuple(range(nf))):
transpose = (
tuple(range(nb))
+ tuple(p + nb for p in perm)
+ tuple(range(nb + nf, nb + 2 * nf))
)
if util.permute_string(comb[:nf], perm) == comb[:nf]:
lambdas[lname] = _put(
lambdas[lname],
mask,
lambdas[lname][mask] + np.transpose(amp, transpose) * sign,
)
done.add(combn)
gcc.lambdas = lambdas
return gcc
ebcc.cc.gebcc.GEBCC.from_rebcc(rcc)
classmethod
Initialise a GEBCC
object from an REBCC
object.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
@classmethod
def from_rebcc(cls, rcc: REBCC) -> GEBCC:
"""Initialise a `GEBCC` object from an `REBCC` object.
Args:
rcc: Restricted electron-boson coupled cluster object.
Returns:
GEBCC object.
"""
from ebcc.cc.uebcc import UEBCC
ucc = UEBCC.from_rebcc(rcc)
gcc = cls.from_uebcc(ucc)
return gcc
ebcc.cc.gebcc.GEBCC.init_space()
Initialise the fermionic space.
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def init_space(self) -> SpaceType:
"""Initialise the fermionic space.
Returns:
Fermionic space. All fermionic degrees of freedom are assumed to be correlated.
"""
space = Space(
self.mo_occ > 0,
np.zeros(self.mo_occ.shape, dtype=np.bool_),
np.zeros(self.mo_occ.shape, dtype=np.bool_),
)
return space
ebcc.cc.gebcc.GEBCC.init_amps(eris=None)
Initialise the cluster amplitudes.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def init_amps(self, eris: Optional[ERIsInputType] = None) -> Namespace[SpinArrayType]:
"""Initialise the cluster amplitudes.
Args:
eris: Electron repulsion integrals.
Returns:
Initial cluster amplitudes.
"""
eris = self.get_eris(eris)
amplitudes: Namespace[SpinArrayType] = util.Namespace()
# Build T amplitudes:
for name, key, n in self.ansatz.fermionic_cluster_ranks(spin_type=self.spin_type):
if n == 1:
amplitudes[name] = getattr(self.fock, key) / self.energy_sum(key)
elif n == 2:
amplitudes[name] = getattr(eris, key) / self.energy_sum(key)
else:
shape = tuple(self.space.size(k) for k in key)
amplitudes[name] = np.zeros(shape, dtype=types[float])
# Build S amplitudes:
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type):
if self.omega is None or self.G is None:
raise ValueError("Bosonic parameters not set.")
if n == 1:
amplitudes[name] = -self.G / self.omega
else:
shape = (self.nbos,) * n
amplitudes[name] = np.zeros(shape, dtype=types[float])
# Build U amplitudes:
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(spin_type=self.spin_type):
if self.omega is None or self.g is None:
raise ValueError("Bosonic parameters not set.")
if nf != 1:
raise util.ModelNotImplemented
if n == 1:
amplitudes[name] = self.g[key] / self.energy_sum(key)
else:
shape = (self.nbos,) * nb + tuple(self.space.size(k) for k in key[nb:])
amplitudes[name] = np.zeros(shape, dtype=types[float])
return amplitudes
ebcc.cc.gebcc.GEBCC.init_lams(amplitudes=None)
Initialise the cluster lambda amplitudes.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def init_lams(
self, amplitudes: Optional[Namespace[SpinArrayType]] = None
) -> Namespace[SpinArrayType]:
"""Initialise the cluster lambda amplitudes.
Args:
amplitudes: Cluster amplitudes.
Returns:
Initial cluster lambda amplitudes.
"""
if not amplitudes:
amplitudes = self.amplitudes
lambdas: Namespace[SpinArrayType] = util.Namespace()
# Build L amplitudes:
for name, key, n in self.ansatz.fermionic_cluster_ranks(spin_type=self.spin_type):
lname = name.replace("t", "l")
perm = list(range(n, 2 * n)) + list(range(n))
lambdas[lname] = np.transpose(amplitudes[name], perm)
# Build LS amplitudes:
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type):
lname = "l" + name
lambdas[lname] = amplitudes[name]
# Build LU amplitudes:
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(spin_type=self.spin_type):
if nf != 1:
raise util.ModelNotImplemented
lname = "l" + name
perm = list(range(nb)) + [nb + 1, nb]
lambdas[lname] = np.transpose(amplitudes[name], perm)
return lambdas
ebcc.cc.gebcc.GEBCC.update_amps(eris=None, amplitudes=None)
Update the cluster amplitudes.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def update_amps(
self,
eris: Optional[ERIsInputType] = None,
amplitudes: Optional[Namespace[SpinArrayType]] = None,
) -> Namespace[SpinArrayType]:
"""Update the cluster amplitudes.
Args:
eris: Electron repulsion integrals.
amplitudes: Cluster amplitudes.
Returns:
Updated cluster amplitudes.
"""
amplitudes = self._get_amps(amplitudes=amplitudes)
func, kwargs = self._load_function(
"update_amps",
eris=eris,
amplitudes=amplitudes,
)
res: Namespace[SpinArrayType] = func(**kwargs)
res = util.Namespace(**{key.rstrip("new"): val for key, val in res.items()})
# Divide T amplitudes:
for name, key, n in self.ansatz.fermionic_cluster_ranks(spin_type=self.spin_type):
res[name] /= self.energy_sum(key)
res[name] += amplitudes[name]
# Divide S amplitudes:
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type):
res[name] /= self.energy_sum(key)
res[name] += amplitudes[name]
# Divide U amplitudes:
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(spin_type=self.spin_type):
if nf != 1:
raise util.ModelNotImplemented
res[name] /= self.energy_sum(key)
res[name] += amplitudes[name]
return res
ebcc.cc.gebcc.GEBCC.update_lams(eris=None, amplitudes=None, lambdas=None, lambdas_pert=None, perturbative=False)
Update the cluster lambda amplitudes.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def update_lams(
self,
eris: Optional[ERIsInputType] = None,
amplitudes: Optional[Namespace[SpinArrayType]] = None,
lambdas: Optional[Namespace[SpinArrayType]] = None,
lambdas_pert: Optional[Namespace[SpinArrayType]] = None,
perturbative: bool = False,
) -> Namespace[SpinArrayType]:
"""Update the cluster lambda amplitudes.
Args:
eris: Electron repulsion integrals.
amplitudes: Cluster amplitudes.
lambdas: Cluster lambda amplitudes.
lambdas_pert: Perturbative cluster lambda amplitudes.
perturbative: Flag to include perturbative correction.
Returns:
Updated cluster lambda amplitudes.
"""
# TODO active
amplitudes = self._get_amps(amplitudes=amplitudes)
lambdas = self._get_lams(lambdas=lambdas, amplitudes=amplitudes)
if lambdas_pert is not None:
lambdas.update(lambdas_pert)
func, kwargs = self._load_function(
"update_lams%s" % ("_perturbative" if perturbative else ""),
eris=eris,
amplitudes=amplitudes,
lambdas=lambdas,
)
res: Namespace[SpinArrayType] = func(**kwargs)
res = util.Namespace(**{key.rstrip("new"): val for key, val in res.items()})
# Divide T amplitudes:
for name, key, n in self.ansatz.fermionic_cluster_ranks(
spin_type=self.spin_type, which="l"
):
res[name] /= self.energy_sum(key)
if not perturbative:
res[name] += lambdas[name]
# Divide S amplitudes:
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type, which="l"):
res[name] /= self.energy_sum(key)
if not perturbative:
res[name] += lambdas[name]
# Divide U amplitudes:
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(
spin_type=self.spin_type, which="l"
):
if nf != 1:
raise util.ModelNotImplemented
res[name] /= self.energy_sum(key)
if not perturbative:
res[name] += lambdas[name]
if perturbative:
res = Namespace(**{key + "pert": val for key, val in res.items()})
return res
ebcc.cc.gebcc.GEBCC.make_rdm1_f(eris=None, amplitudes=None, lambdas=None, hermitise=True)
Make the one-particle fermionic reduced density matrix :math:\langle i^+ j \rangle
.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def make_rdm1_f(
self,
eris: Optional[ERIsInputType] = None,
amplitudes: Optional[Namespace[SpinArrayType]] = None,
lambdas: Optional[Namespace[SpinArrayType]] = None,
hermitise: bool = True,
) -> SpinArrayType:
r"""Make the one-particle fermionic reduced density matrix :math:`\langle i^+ j \rangle`.
Args:
eris: Electron repulsion integrals.
amplitudes: Cluster amplitudes.
lambdas: Cluster lambda amplitudes.
hermitise: Hermitise the density matrix.
Returns:
One-particle fermion reduced density matrix.
"""
func, kwargs = self._load_function(
"make_rdm1_f",
eris=eris,
amplitudes=amplitudes,
lambdas=lambdas,
)
dm: SpinArrayType = func(**kwargs)
if hermitise:
dm = (dm + np.transpose(dm)) * 0.5
return dm
ebcc.cc.gebcc.GEBCC.make_rdm2_f(eris=None, amplitudes=None, lambdas=None, hermitise=True)
Make the two-particle fermionic reduced density matrix :math:\langle i^+j^+lk \rangle
.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def make_rdm2_f(
self,
eris: Optional[ERIsInputType] = None,
amplitudes: Optional[Namespace[SpinArrayType]] = None,
lambdas: Optional[Namespace[SpinArrayType]] = None,
hermitise: bool = True,
) -> SpinArrayType:
r"""Make the two-particle fermionic reduced density matrix :math:`\langle i^+j^+lk \rangle`.
Args:
eris: Electron repulsion integrals.
amplitudes: Cluster amplitudes.
lambdas: Cluster lambda amplitudes.
hermitise: Hermitise the density matrix.
Returns:
Two-particle fermion reduced density matrix.
"""
func, kwargs = self._load_function(
"make_rdm2_f",
eris=eris,
amplitudes=amplitudes,
lambdas=lambdas,
)
dm: SpinArrayType = func(**kwargs)
if hermitise:
dm = (np.transpose(dm, (0, 1, 2, 3)) + np.transpose(dm, (2, 3, 0, 1))) * 0.5
dm = (np.transpose(dm, (0, 1, 2, 3)) + np.transpose(dm, (1, 0, 3, 2))) * 0.5
return dm
ebcc.cc.gebcc.GEBCC.make_eb_coup_rdm(eris=None, amplitudes=None, lambdas=None, unshifted=True, hermitise=True)
Make the electron-boson coupling reduced density matrix.
.. math:: \langle b^+ i^+ j \rangle
and
.. math:: \langle b i^+ j \rangle
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def make_eb_coup_rdm(
self,
eris: Optional[ERIsInputType] = None,
amplitudes: Optional[Namespace[SpinArrayType]] = None,
lambdas: Optional[Namespace[SpinArrayType]] = None,
unshifted: bool = True,
hermitise: bool = True,
) -> SpinArrayType:
r"""Make the electron-boson coupling reduced density matrix.
.. math::
\langle b^+ i^+ j \rangle
and
.. math::
\langle b i^+ j \rangle
Args:
eris: Electron repulsion integrals.
amplitudes: Cluster amplitudes.
lambdas: Cluster lambda amplitudes.
unshifted: If `self.options.shift` is `True`, return the unshifted density matrix. Has
no effect if `self.options.shift` is `False`.
hermitise: Hermitise the density matrix.
Returns:
Electron-boson coupling reduced density matrix.
"""
func, kwargs = self._load_function(
"make_eb_coup_rdm",
eris=eris,
amplitudes=amplitudes,
lambdas=lambdas,
)
dm_eb: SpinArrayType = func(**kwargs)
if hermitise:
dm_eb = np.array(
[
(dm_eb[0] + np.transpose(dm_eb[1], (0, 2, 1))) * 0.5,
(dm_eb[1] + np.transpose(dm_eb[0], (0, 2, 1))) * 0.5,
]
)
if unshifted and self.options.shift:
rdm1_f = self.make_rdm1_f(hermitise=hermitise)
shift = util.einsum("x,ij->xij", self.xi, rdm1_f)
dm_eb -= shift[None]
return dm_eb
ebcc.cc.gebcc.GEBCC.energy_sum(*args, signs_dict=None)
Get a direct sum of energies.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def energy_sum(self, *args: str, signs_dict: Optional[dict[str, str]] = None) -> NDArray[T]:
"""Get a direct sum of energies.
Args:
*args: Energies to sum. Should specify a subscript only.
signs_dict: Signs of the energies in the sum. Default sets `("o", "O", "i")` to be
positive, and `("v", "V", "a", "b")` to be negative.
Returns:
Sum of energies.
"""
(subscript,) = args
n = 0
def next_char() -> str:
nonlocal n
if n < 26:
char = chr(ord("a") + n)
else:
char = chr(ord("A") + n)
n += 1
return char
if signs_dict is None:
signs_dict = {}
for k, s in zip("vVaoOib", "---+++-"):
if k not in signs_dict:
signs_dict[k] = s
energies = []
for key in subscript:
factor = 1 if signs_dict[key] == "+" else -1
if key == "b":
assert self.omega is not None
energies.append(self.omega * types[float](factor))
else:
energies.append(np.diag(self.fock[key + key]) * types[float](factor))
subscript = ",".join([next_char() for k in subscript])
energy_sum = util.dirsum(subscript, *energies)
return energy_sum
ebcc.cc.gebcc.GEBCC.amplitudes_to_vector(amplitudes)
Construct a vector containing all of the amplitudes used in the given ansatz.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def amplitudes_to_vector(self, amplitudes: Namespace[SpinArrayType]) -> NDArray[T]:
"""Construct a vector containing all of the amplitudes used in the given ansatz.
Args:
amplitudes: Cluster amplitudes.
Returns:
Cluster amplitudes as a vector.
"""
vectors = []
for name, key, n in self.ansatz.fermionic_cluster_ranks(spin_type=self.spin_type):
vectors.append(np.ravel(amplitudes[name]))
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type):
vectors.append(np.ravel(amplitudes[name]))
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(spin_type=self.spin_type):
vectors.append(np.ravel(amplitudes[name]))
return np.concatenate(vectors)
ebcc.cc.gebcc.GEBCC.vector_to_amplitudes(vector)
Construct a namespace of amplitudes from a vector.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def vector_to_amplitudes(self, vector: NDArray[T]) -> Namespace[SpinArrayType]:
"""Construct a namespace of amplitudes from a vector.
Args:
vector: Cluster amplitudes as a vector.
Returns:
Cluster amplitudes.
"""
amplitudes: Namespace[SpinArrayType] = util.Namespace()
i0 = 0
for name, key, n in self.ansatz.fermionic_cluster_ranks(spin_type=self.spin_type):
shape = tuple(self.space.size(k) for k in key)
size = util.prod(shape)
amplitudes[name] = np.reshape(vector[i0 : i0 + size], shape)
i0 += size
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type):
shape = (self.nbos,) * n
size = util.prod(shape)
amplitudes[name] = np.reshape(vector[i0 : i0 + size], shape)
i0 += size
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(spin_type=self.spin_type):
shape = (self.nbos,) * nb + tuple(self.space.size(k) for k in key[nb:])
size = util.prod(shape)
amplitudes[name] = np.reshape(vector[i0 : i0 + size], shape)
i0 += size
return amplitudes
ebcc.cc.gebcc.GEBCC.lambdas_to_vector(lambdas)
Construct a vector containing all of the lambda amplitudes used in the given ansatz.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def lambdas_to_vector(self, lambdas: Namespace[SpinArrayType]) -> NDArray[T]:
"""Construct a vector containing all of the lambda amplitudes used in the given ansatz.
Args:
lambdas: Cluster lambda amplitudes.
Returns:
Cluster lambda amplitudes as a vector.
"""
vectors = []
for name, key, n in self.ansatz.fermionic_cluster_ranks(
spin_type=self.spin_type, which="l"
):
vectors.append(np.ravel(lambdas[name]))
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type, which="l"):
vectors.append(np.ravel(lambdas[name]))
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(
spin_type=self.spin_type, which="l"
):
vectors.append(np.ravel(lambdas[name]))
return np.concatenate(vectors)
ebcc.cc.gebcc.GEBCC.vector_to_lambdas(vector)
Construct a namespace of lambda amplitudes from a vector.
Parameters: |
|
---|
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def vector_to_lambdas(self, vector: NDArray[T]) -> Namespace[SpinArrayType]:
"""Construct a namespace of lambda amplitudes from a vector.
Args:
vector: Cluster lambda amplitudes as a vector.
Returns:
Cluster lambda amplitudes.
"""
lambdas: Namespace[SpinArrayType] = util.Namespace()
i0 = 0
for name, key, n in self.ansatz.fermionic_cluster_ranks(
spin_type=self.spin_type, which="l"
):
shape = tuple(self.space.size(k) for k in key)
size = util.prod(shape)
lambdas[name] = np.reshape(vector[i0 : i0 + size], shape)
i0 += size
for name, key, n in self.ansatz.bosonic_cluster_ranks(spin_type=self.spin_type, which="l"):
shape = (self.nbos,) * n
size = util.prod(shape)
lambdas[name] = np.reshape(vector[i0 : i0 + size], shape)
i0 += size
for name, key, nf, nb in self.ansatz.coupling_cluster_ranks(
spin_type=self.spin_type, which="l"
):
shape = (self.nbos,) * nb + tuple(self.space.size(k) for k in key[nb:])
size = util.prod(shape)
lambdas[name] = np.reshape(vector[i0 : i0 + size], shape)
i0 += size
return lambdas
ebcc.cc.gebcc.GEBCC.get_mean_field_G()
Get the mean-field boson non-conserving term.
Returns: |
|
---|
Source code in ebcc/cc/gebcc.py
def get_mean_field_G(self) -> NDArray[T]:
"""Get the mean-field boson non-conserving term.
Returns:
Mean-field boson non-conserving term.
"""
assert self.g is not None
assert self.omega is not None
# FIXME should this also sum in frozen orbitals?
boo: NDArray[T] = self.g.boo
val = util.einsum("Ipp->I", boo)
val -= self.xi * self.omega
if self.bare_G is not None:
val += self.bare_G
return val