MADNESS  0.10.1
libxc.h
Go to the documentation of this file.
1 /*
2  This file is part of MADNESS.
3 
4  Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20  For more information please contact:
21 
22  Robert J. Harrison
23  Oak Ridge National Laboratory
24  One Bethel Valley Road
25  P.O. Box 2008, MS-6367
26 
27  email: harrisonrj@ornl.gov
28  tel: 865-241-3937
29  fax: 865-572-0680
30 */
31 
32 /*
33  * libxc.h
34  *
35  * Created on: Nov 23, 2008
36  * Author: wsttiger
37  */
38 
39 #ifndef LIBXC_H_
40 #define LIBXC_H_
41 
42 #include <madness/mra/mra.h>
43 #include <madness/world/MADworld.h>
44 //#include "xc.h"
45 #include "lda.h"
46 
47 //***************************************************************************
48 static double munge(double r) {
49  if (r < 1e-15) r = 2e-15;
50  return r;
51 }
52 //***************************************************************************
53 
54 //***************************************************************************
55 template <typename T>
56 inline static void ldaop(const Key<3>& key, Tensor<T>& t) {
57  UNARY_OPTIMIZED_ITERATOR(T, t, double r=munge(2.0* *_p0); double q; double dq1; double dq2;x_rks_s__(&r, &q, &dq1);c_rks_vwn5__(&r, &q, &dq2); *_p0 = dq1+dq2);
58 }
59 //***************************************************************************
60 
61 //***************************************************************************
62 template <typename T>
63 inline static void ldaeop(const Key<3>& key, Tensor<T>& t) {
64  UNARY_OPTIMIZED_ITERATOR(T, t, double r=munge(2.0* *_p0); double q1; double q2; double dq;x_rks_s__(&r, &q1, &dq);c_rks_vwn5__(&r, &q2, &dq); *_p0 = q1+q2);
65 }
66 //***************************************************************************
67 
68 ////***************************************************************************
69 //template <typename T>
70 //inline static void libxc_ldaop(const Key<3>& key, Tensor<T>& t) {
71 // XC(lda_type) xc_c_func;
72 // XC(lda_type) xc_x_func;
73 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_UNPOLARIZED);
74 // xc_lda_x_init(&xc_x_func, XC_UNPOLARIZED, 3, 0);
75 // UNARY_OPTIMIZED_ITERATOR(T, t, double r=munge(2.0* *_p0); double q; double dq1; double dq2;
76 // xc_lda_vxc(&xc_x_func, &r, &q, &dq1); xc_lda_vxc(&xc_c_func, &r, &q, &dq2);
77 // *_p0 = dq1+dq2);
78 //}
79 ////***************************************************************************
80 
81 ////***************************************************************************
82 //template <typename T>
83 //inline static void libxc_ldaop_sp(const Key<3>& key, Tensor<T>& t, Tensor<T>& a, Tensor<T>& b)
84 //{
85 // XC(lda_type) xc_c_func;
86 // XC(lda_type) xc_x_func;
87 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_POLARIZED);
88 // xc_lda_x_init(&xc_x_func, XC_POLARIZED, 3, 0);
89 // TERNARY_OPTIMIZED_ITERATOR(T, t, T, a, T, b, double r[2]; r[0] = munge(*_p1);
90 // r[1] = munge(*_p2); double q[2]; double dq1[2]; double dq2[2];
91 // xc_lda_vxc(&xc_x_func, &r[0], &q[0], &dq1[0]); xc_lda_vxc(&xc_c_func, &r[0], &q[0], &dq2[0]);
92 // *_p0 = dq1[0]+dq2[0]);
93 //}
94 ////***************************************************************************
95 
96 ////***************************************************************************
97 //template <typename T>
98 //inline static void libxc_ldaeop_sp(const Key<3>& key, Tensor<T>& t, Tensor<T>& a, Tensor<T>& b)
99 //{
100 // XC(lda_type) xc_c_func;
101 // XC(lda_type) xc_x_func;
102 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_POLARIZED);
103 // xc_lda_x_init(&xc_x_func, XC_POLARIZED, 3, 0);
104 // TERNARY_OPTIMIZED_ITERATOR(T, t, T, a, T, b, double r[2]; r[0] = munge(*_p1);
105 // r[1] = munge(*_p2); double q1[2]; double q2[2]; double dq[2];
106 // xc_lda_vxc(&xc_x_func, &r[0], &q1[0], &dq[0]); xc_lda_vxc(&xc_c_func, &r[0], &q2[0], &dq[0]);
107 // *_p0 = q1[0]+q2[0]);
108 //}
109 ////***************************************************************************
110 
111 ////***************************************************************************
112 //inline static void libxc_ldaeop_sp(const Key<3>& key, Tensor<double>& t) {
113 // XC(lda_type) xc_c_func;
114 // XC(lda_type) xc_x_func;
115 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_UNPOLARIZED);
116 // xc_lda_x_init(&xc_x_func, XC_UNPOLARIZED, 3, 0);
117 // UNARY_OPTIMIZED_ITERATOR(double, t, double r=munge(2.0* *_p0); double q1; double q2; double dq; xc_lda_vxc(&xc_x_func, &r, &q1, &dq); xc_lda_vxc(&xc_c_func, &r, &q2, &dq); *_p0 = q1+q2);
118 //}
119 ////***************************************************************************
120 
121 //const double THRESH_RHO = 1e-8;
122 //const double THRESH_GRHO = 1e-20;
123 //
124 ////***************************************************************************
125 //inline void wst_munge_grho(int npoint, double *rho, double *grho) {
126 // for (int i=0; i<npoint; i++) {
127 // if (rho[i]<THRESH_RHO) rho[i] = THRESH_RHO;
128 // if ((rho[i] <=THRESH_RHO) ||
129 // (grho[i] < THRESH_GRHO)) grho[i] = THRESH_GRHO;
130 // }
131 //}
132 ////***************************************************************************
133 //
134 ////***************************************************************************
135 //inline void wst_munge_rho(int npoint, double *rho) {
136 // for (int i=0; i<npoint; i++) {
137 // if (rho[i]<THRESH_RHO) rho[i] = THRESH_RHO;
138 // }
139 //}
140 ////***************************************************************************
141 //
142 ////***************************************************************************
143 //inline void xc_generic_lda(Tensor<double> rho_alpha, ///< Alpha-spin density at each grid point
144 // Tensor<double> f, ///< Value of functional at each grid point
145 // Tensor<double> df_drho, ///< Derivative of functional w.r.t. rho_alpha
146 // bool spinpol)
147 // {
148 // MADNESS_ASSERT(rho_alpha.iscontiguous());
149 // MADNESS_ASSERT(f.iscontiguous());
150 // MADNESS_ASSERT(df_drho.iscontiguous());
151 //
152 // rho_alpha = rho_alpha.flat();
153 // f = f.flat();
154 // df_drho = df_drho.flat();
155 //
156 // XC(lda_type) xc_c_func;
157 // XC(lda_type) xc_x_func;
158 //
159 // int npt = rho_alpha.dim(0);
160 //
161 // Tensor<double> tf(npt);
162 // Tensor<double> tdf_drho(npt);
163 // double* rhoptr = rho_alpha.ptr();
164 // double* tfptr = tf.ptr();
165 // double* tdf_drhoptr = tdf_drho.ptr();
166 //
167 // tf.fill(0.0);
168 // tdf_drho.fill(0.0);
169 // f.fill(0.0);
170 // df_drho.fill(0.0);
171 //
172 // wst_munge_rho(npt, rhoptr);
173 //
174 // xc_lda_init(&xc_c_func, XC_LDA_C_VWN,XC_UNPOLARIZED);
175 // for (int i = 0; i < npt; i++)
176 // {
177 // xc_lda_vxc(&xc_c_func, &rhoptr[i], &tfptr[i], &tdf_drhoptr[i]);
178 // }
179 //
180 // f.gaxpy(1.0, tf, 1.0);
181 // df_drho.gaxpy(1.0, tdf_drho, 1.0);
182 //
183 // tf.fill(0.0);
184 // tdf_drho.fill(0.0);
185 //
186 // xc_lda_x_init(&xc_x_func, XC_UNPOLARIZED, 3, 0);
187 // for (int i = 0; i < npt; i++)
188 // {
189 // xc_lda_vxc(&xc_x_func, &rhoptr[i], &tfptr[i], &tdf_drhoptr[i]);
190 // }
191 //
192 // f.gaxpy(1.0, tf, 1.0);
193 // df_drho.gaxpy(1.0, tdf_drho, 1.0);
194 //}
195 // //***************************************************************************
196 //
197 // //***************************************************************************
198 // template <int NDIM>
199 // inline void xc_lda_V(const Key<NDIM>& key, Tensor<double>& t)
200 // {
201 // Tensor<double> enefunc = copy(t);
202 // Tensor<double> V = copy(t);
203 // ::xc_generic_lda(t, enefunc, V, false);
204 // t(___) = V(___);
205 // }
206 // //***************************************************************************
207 //
208 // //***************************************************************************
209 // template <int NDIM>
210 // inline void xc_lda_ene(const Key<NDIM>& key, Tensor<double>& t)
211 // {
212 // Tensor<double> V = copy(t);
213 // Tensor<double> enefunc = copy(t);
214 // ::xc_generic_lda(t, enefunc, V, false);
215 // t(___) = enefunc(___);
216 // }
217 // //***************************************************************************
218 
219 
220 
221 #endif /* LIBXC_H_ */
double q(double t)
Definition: DKops.h:18
This header should include pretty much everything needed for the parallel runtime.
auto T(World &world, response_space &f) -> response_space
Definition: global_functions.cc:34
int c_rks_vwn5__(const double *r__, double *f, double *dfdra)
Definition: lda.h:166
int x_rks_s__(const double *r__, double *f, double *dfdra)
Definition: lda.h:84
static void ldaop(const Key< 3 > &key, Tensor< T > &t)
Definition: libxc.h:56
static double munge(double r)
Definition: libxc.h:48
static void ldaeop(const Key< 3 > &key, Tensor< T > &t)
Definition: libxc.h:63
Main include file for MADNESS and defines Function interface.
Definition: test_dc.cc:47
#define UNARY_OPTIMIZED_ITERATOR(X, x, exp)
Definition: tensor_macros.h:658
void e()
Definition: test_sig.cc:75