FOSSology  3.2.0rc1
Open Source License Compliance by Open Source Software
buckets.c
Go to the documentation of this file.
1 /***************************************************************
2  Copyright (C) 2010-2014 Hewlett-Packard Development Company, L.P.
3 
4  This program is free software; you can redistribute it and/or
5  modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License along
14  with this program; if not, write to the Free Software Foundation, Inc.,
15  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 
17  ***************************************************************/
65 #include "buckets.h"
66 
68 int debug = 0;
69 
74 
76 #ifdef COMMIT_HASH_S
77 char BuildVersion[]="buckets build version: " VERSION_S " r(" COMMIT_HASH_S ").\n";
78 #else
79 char BuildVersion[]="buckets build version: NULL.\n";
80 #endif
81 
82 /****************************************************/
83 int main(int argc, char **argv)
84 {
85  char *agentDesc = "Bucket agent";
86  int cmdopt;
87  int verbose = 0;
88  int ReadFromStdin = 1;
89  int head_uploadtree_pk = 0;
90  PGconn *pgConn;
91  PGresult *topresult;
92  PGresult *result;
93  char sqlbuf[512];
94  char *Delims = ",= \t\n\r";
95  char *token, *saveptr;
96  int agent_pk = 0;
97  int nomos_agent_pk = 0;
98  int bucketpool_pk = 0;
99  int ars_pk = 0;
100  int readnum = 0;
101  int rv;
102  int hasPrules;
103  int user_pk = 0;
104  char *bucketpool_name;
105  char *COMMIT_HASH;
106  char *VERSION;
107  char *uploadtree_tablename;
108  char agent_rev[myBUFSIZ];
109  int rerun = 0;
110 
111 
112 // int *bucketList;
113  pbucketdef_t bucketDefArray = 0;
114  pbucketdef_t tmpbucketDefArray = 0;
117  uploadtree.upload_fk = 0;
118 
119  /* connect to the scheduler */
120  fo_scheduler_connect(&argc, argv, &pgConn);
121  user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */
122 
123  /* command line options */
124  while ((cmdopt = getopt(argc, argv, "rin:p:t:u:vc:hV")) != -1)
125  {
126  switch (cmdopt)
127  {
128  case 'i': /* "Initialize" */
129  PQfinish(pgConn);
130  exit(0);
131  case 'n': /* bucketpool_name */
132  ReadFromStdin = 0;
133  bucketpool_name = optarg;
134  /* find the highest rev active bucketpool_pk */
135  if (!bucketpool_pk)
136  {
137  bucketpool_pk = getBucketpool_pk(pgConn, bucketpool_name);
138  if (!bucketpool_pk)
139  printf("%s is not an active bucketpool name.\n", bucketpool_name);
140  }
141  break;
142  case 'p': /* bucketpool_pk */
143  ReadFromStdin = 0;
144  bucketpool_pk = atoi(optarg);
145  /* validate bucketpool_pk */
146  sprintf(sqlbuf, "select bucketpool_pk from bucketpool where bucketpool_pk=%d and active='Y'", bucketpool_pk);
147  bucketpool_pk = validate_pk(pgConn, sqlbuf);
148  if (!bucketpool_pk)
149  printf("%d is not an active bucketpool_pk.\n", atoi(optarg));
150  break;
151  case 't': /* uploadtree_pk */
152  ReadFromStdin = 0;
153  if (uploadtree.upload_fk) break;
154  head_uploadtree_pk = atoi(optarg);
155  /* validate bucketpool_pk */
156  sprintf(sqlbuf, "select uploadtree_pk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
157  head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
158  if (!head_uploadtree_pk)
159  printf("%d is not an active uploadtree_pk.\n", atoi(optarg));
160  break;
161  case 'u': /* upload_pk */
162  ReadFromStdin = 0;
163  if (!head_uploadtree_pk)
164  {
165  uploadtree.upload_fk = atoi(optarg);
166  /* validate upload_pk and get uploadtree_pk */
167  sprintf(sqlbuf, "select upload_pk from upload where upload_pk=%d", uploadtree.upload_fk);
168  uploadtree.upload_fk = validate_pk(pgConn, sqlbuf);
169  if (!uploadtree.upload_fk)
170  printf("%d is not an valid upload_pk.\n", atoi(optarg));
171  else
172  {
173  sprintf(sqlbuf, "select uploadtree_pk from uploadtree where upload_fk=%d and parent is null", uploadtree.upload_fk);
174  head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
175  }
176  }
177  break;
178  case 'v': /* verbose output for debugging */
179  verbose++;
180  break;
181  case 'c': break; /* handled by fo_scheduler_connect() */
182  case 'r':
183  rerun = 1;
184  break;
185  case 'V': /* print version info */
186  printf("%s", BuildVersion);
187  PQfinish(pgConn);
188  exit(0);
189  default:
190  Usage(argv[0]);
191  PQfinish(pgConn);
192  exit(-1);
193  }
194  }
195  debug = verbose;
196 
197  /*** validate command line ***/
198  if (!bucketpool_pk && !ReadFromStdin)
199  {
200  printf("FATAL: You must specify an active bucketpool.\n");
201  Usage(argv[0]);
202  exit(-1);
203  }
204  if (!head_uploadtree_pk && !ReadFromStdin)
205  {
206  printf("FATAL: You must specify a valid uploadtree_pk or upload_pk.\n");
207  Usage(argv[0]);
208  exit(-1);
209  }
210 
211  /* get agent pk
212  * Note, if GetAgentKey fails, this process will exit.
213  */
214  COMMIT_HASH = fo_sysconfig("buckets", "COMMIT_HASH");
215  VERSION = fo_sysconfig("buckets", "VERSION");
216  sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH);
217  agent_pk = fo_GetAgentKey(pgConn, basename(argv[0]), uploadtree.upload_fk, agent_rev, agentDesc);
218 
219  /*** Initialize the license_ref table cache ***/
220  /* Build the license ref cache to hold 2**11 (2048) licenses.
221  This MUST be a power of 2.
222  */
223  cacheroot.maxnodes = 2<<11;
224  cacheroot.nodes = calloc(cacheroot.maxnodes, sizeof(cachenode_t));
225  if (!lrcache_init(pgConn, &cacheroot))
226  {
227  printf("FATAL: Bucket agent could not allocate license_ref table cache.\n");
228  exit(1);
229  }
230 
231 
232  /* main processing loop */
233  while(++readnum)
234  {
235  uploadtree.upload_fk = 0;
236  if (ReadFromStdin)
237  {
238  bucketpool_pk = 0;
239 
240  /* Read the bucketpool_pk and upload_pk from stdin.
241  * Format looks like 'bppk=123, upk=987'
242  */
243  if (!fo_scheduler_next()) break;
244 
245  token = strtok_r(fo_scheduler_current(), Delims, &saveptr);
246  while (token && (!uploadtree.upload_fk || !bucketpool_pk))
247  {
248  if (strcmp(token, "bppk") == 0)
249  {
250  bucketpool_pk = atoi(strtok_r(NULL, Delims, &saveptr));
251  }
252  else
253  if (strcmp(token, "upk") == 0)
254  {
255  uploadtree.upload_fk = atoi(strtok_r(NULL, Delims, &saveptr));
256  }
257  token = strtok_r(NULL, Delims, &saveptr);
258  }
259 
260  /* Check Permissions */
261  if (GetUploadPerm(pgConn, uploadtree.upload_fk, user_pk) < PERM_WRITE)
262  {
263  LOG_ERROR("You have no update permissions on upload %d", uploadtree.upload_fk);
264  continue;
265  }
266 
267  /* From the upload_pk, get the head of the uploadtree, pfile_pk and ufile_name */
268  sprintf(sqlbuf, "select uploadtree_pk, pfile_fk, ufile_name, ufile_mode,lft,rgt from uploadtree \
269  where upload_fk='%d' and parent is null limit 1", uploadtree.upload_fk);
270  topresult = PQexec(pgConn, sqlbuf);
271  if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__)) return -1;
272  if (PQntuples(topresult) == 0)
273  {
274  printf("ERROR: %s.%s missing upload_pk %d.\nsql: %s",
275  __FILE__, agentDesc, uploadtree.upload_fk, sqlbuf);
276  PQclear(topresult);
277  continue;
278  }
279  head_uploadtree_pk = atol(PQgetvalue(topresult, 0, 0));
280  uploadtree.uploadtree_pk = head_uploadtree_pk;
281  uploadtree.upload_fk = uploadtree.upload_fk;
282  uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 1));
283  uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 2));
284  uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 3));
285  uploadtree.lft = atoi(PQgetvalue(topresult, 0, 4));
286  uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 5));
287  PQclear(topresult);
288  } /* end ReadFromStdin */
289  else
290  {
291  /* Only one input to process if from command line, so terminate if it's been done */
292  if (readnum > 1) break;
293 
294  /* not reading from stdin
295  * Get the pfile, and ufile_name for head_uploadtree_pk
296  */
297  sprintf(sqlbuf, "select pfile_fk, ufile_name, ufile_mode,lft,rgt, upload_fk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
298  topresult = PQexec(pgConn, sqlbuf);
299  if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__))
300  {
301  free(uploadtree.ufile_name);
302  return -1;
303  }
304  if (PQntuples(topresult) == 0)
305  {
306  printf("FATAL: %s.%s missing root uploadtree_pk %d\n",
307  __FILE__, agentDesc, head_uploadtree_pk);
308  PQclear(topresult);
309  continue;
310  }
311  uploadtree.uploadtree_pk = head_uploadtree_pk;
312  uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 0));
313  uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 1));
314  uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 2));
315  uploadtree.lft = atoi(PQgetvalue(topresult, 0, 3));
316  uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 4));
317  uploadtree.upload_fk = atoi(PQgetvalue(topresult, 0, 5));
318  PQclear(topresult);
319  }
320 
321  /* Find the most recent nomos data for this upload. That's what we want to use
322  to process the buckets.
323  */
324  nomos_agent_pk = LatestNomosAgent(pgConn, uploadtree.upload_fk);
325  if (nomos_agent_pk == 0)
326  {
327  printf("WARNING: Bucket agent called on treeitem (%d), but the latest nomos agent hasn't created any license data for this tree.\n",
328  head_uploadtree_pk);
329  continue;
330  }
331 
332  /* at this point we know:
333  * bucketpool_pk, bucket agent_pk, nomos agent_pk, upload_pk,
334  * pfile_pk, and head_uploadtree_pk (the uploadtree_pk of the head tree to scan)
335  */
336 
337  /* Has the upload already been processed? If so, we are done.
338  Don't even bother to create a bucket_ars entry.
339  */
340  switch (UploadProcessed(pgConn, agent_pk, nomos_agent_pk, uploadtree.pfile_fk, head_uploadtree_pk, uploadtree.upload_fk, bucketpool_pk))
341  {
342  case 1: /* upload has already been processed */
343  if (1 == rerun) break;
344  printf("LOG: Duplicate request for bucket agent to process upload_pk: %d, uploadtree_pk: %d, bucketpool_pk: %d, bucket agent_pk: %d, nomos agent_pk: %d, pfile_pk: %d ignored.\n",
345  uploadtree.upload_fk, head_uploadtree_pk, bucketpool_pk, agent_pk, nomos_agent_pk, uploadtree.pfile_fk);
346  continue;
347  case -1: /* SQL error, UploadProcessed() wrote error message */
348  continue;
349  case 0: /* upload has not been processed */
350  break;
351  }
352 
353  /*** Initialize the Bucket Definition List bucketDefArray ***/
354  bucketDefArray = initBuckets(pgConn, bucketpool_pk, &cacheroot);
355  if (bucketDefArray == 0)
356  {
357  printf("FATAL: %s.%d Bucket definition for pool %d could not be initialized.\n",
358  __FILE__, __LINE__, bucketpool_pk);
359  exit(-2);
360  }
361  bucketDefArray->nomos_agent_pk = nomos_agent_pk;
362  bucketDefArray->bucket_agent_pk = agent_pk;
363 
364  /* Find the correct uploadtree table name */
365  uploadtree_tablename = GetUploadtreeTableName(pgConn, uploadtree.upload_fk);
366  if (!(uploadtree_tablename))
367  {
368  LOG_FATAL("buckets passed invalid upload, upload_pk = %d", uploadtree.upload_fk);
369  return(-110);
370  }
371 
372  /* set uploadtree_tablename in all the bucket definition structs */
373  for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
374  {
375  tmpbucketDefArray->uploadtree_tablename = uploadtree_tablename;
376  }
377 
378  /* loop through rules (bucket defs) to see if there are any package only rules */
379  hasPrules = 0;
380  for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
381  if (tmpbucketDefArray->applies_to == 'p')
382  {
383  hasPrules = 1;
384  break;
385  }
386 
387  /*** END initializing bucketDefArray ***/
388 
389  /*** Initialize DEB_SOURCE and DEB_BINARY ***/
390  sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-package'");
391  result = PQexec(pgConn, sqlbuf);
392  if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
393  if (PQntuples(result) == 0)
394  {
395  printf("FATAL: (%s.%d) Missing application/x-debian-package mimetype.\n",__FILE__,__LINE__);
396  return -1;
397  }
398  DEB_BINARY = atoi(PQgetvalue(result, 0, 0));
399  PQclear(result);
400 
401  sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-source'");
402  result = PQexec(pgConn, sqlbuf);
403  if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
404  if (PQntuples(result) == 0)
405  {
406  printf("FATAL: (%s.%d) Missing application/x-debian-source mimetype.\n",__FILE__,__LINE__);
407  return -1;
408  }
409  DEB_SOURCE = atoi(PQgetvalue(result, 0, 0));
410  PQclear(result);
411  /*** END Initialize DEB_SOURCE and DEB_BINARY ***/
412 
413  /*** Record analysis start in bucket_ars, the bucket audit trail. ***/
414  if (0 == rerun) { // do not have any bucket scan on this upload
415  snprintf(sqlbuf, sizeof(sqlbuf),
416  "insert into bucket_ars (agent_fk, upload_fk, ars_success, nomosagent_fk, bucketpool_fk) values(%d,%d,'%s',%d,%d)",
417  agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
418  if (debug)
419  printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);
420 
421  result = PQexec(pgConn, sqlbuf);
422  if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
423  PQclear(result);
424 
425  /* retrieve the ars_pk of the newly inserted record */
426  sprintf(sqlbuf, "select ars_pk from bucket_ars where agent_fk='%d' and upload_fk='%d' and ars_success='%s' and nomosagent_fk='%d' \
427  and bucketpool_fk='%d' and ars_endtime is null \
428  order by ars_starttime desc limit 1",
429  agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
430  result = PQexec(pgConn, sqlbuf);
431  if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
432  if (PQntuples(result) == 0)
433  {
434  printf("FATAL: (%s.%d) Missing bucket_ars record.\n%s\n",__FILE__,__LINE__,sqlbuf);
435  return -1;
436  }
437  ars_pk = atol(PQgetvalue(result, 0, 0));
438  PQclear(result);
439  }
440  /*** END bucket_ars insert ***/
441 
442  if (debug) printf("%s sql: %s\n",__FILE__, sqlbuf);
443 
444  /* process the tree for buckets
445  Do this as a single transaction, therefore this agent must be
446  run as a single thread. This will prevent the scheduler from
447  consuming excess time (this is a fast agent), and allow this
448  process to update bucket_ars.
449  */
450  rv = walkTree(pgConn, bucketDefArray, agent_pk, head_uploadtree_pk, 0,
451  hasPrules);
452  /* if no errors and top level is a container, process the container */
453  if ((!rv) && (IsContainer(uploadtree.ufile_mode)))
454  {
455  rv = processFile(pgConn, bucketDefArray, &uploadtree, agent_pk, hasPrules);
456  }
457 
458  /* Record analysis end in bucket_ars, the bucket audit trail. */
459  if (0 == rerun && ars_pk)
460  {
461  if (rv)
462  snprintf(sqlbuf, sizeof(sqlbuf),
463  "update bucket_ars set ars_endtime=now(), ars_success=false where ars_pk='%d'",
464  ars_pk);
465  else
466  snprintf(sqlbuf, sizeof(sqlbuf),
467  "update bucket_ars set ars_endtime=now(), ars_success=true where ars_pk='%d'",
468  ars_pk);
469 
470  if (debug)
471  printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);
472 
473  result = PQexec(pgConn, sqlbuf);
474  if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
475  PQclear(result);
476  }
477  } /* end of main processing loop */
478 
479  lrcache_free(&cacheroot);
480  free(bucketDefArray);
481 
482  PQfinish(pgConn);
484  return (0);
485 }
int debug
Definition: buckets.c:68
FUNCTION char * GetUploadtreeTableName(PGconn *pgConn, int upload_pk)
Get the uploadtree table name for this upload_pk If upload_pk does not exist, return "uploadtree"...
Definition: libfossagent.c:421
int DEB_SOURCE
Definition: buckets.c:71
int fo_checkPQresult(PGconn *pgConn, PGresult *result, char *sql, char *FileID, int LineNumb)
Check the result status of a postgres SELECT.
Definition: libfossdb.c:181
int bucket_agent_pk
Definition: buckets.h:82
int maxnodes
No. of nodes in the list.
Definition: liccache.h:53
cachenode_t * nodes
Array of nodes.
Definition: liccache.h:54
char BuildVersion[]
Definition: buckets.c:79
int main(int argc, char **argv)
Definition: buckets.c:83
PGconn * pgConn
Database connection.
Definition: adj2nest.c:98
char * fo_scheduler_current()
Get the last read string from the scheduler.
FUNCTION int LatestNomosAgent(PGconn *pgConn, int upload_pk)
Get the latest nomos agent_pk that has data for this this uploadtree.
Definition: inits.c:614
void fo_scheduler_disconnect(int retcode)
Disconnect the scheduler connection.
void fo_scheduler_connect(int *argc, char **argv, PGconn **db_conn)
Establish a connection between an agent and the scheduler.
FUNCTION int processFile(PGconn *pgConn, pbucketdef_t bucketDefArray, puploadtree_t puploadtree, int agent_pk, int hasPrules)
Process a file.
Definition: walk.c:178
FUNCTION int getBucketpool_pk(PGconn *pgConn, char *bucketpool_name)
Get a bucketpool_pk based on the bucketpool_name.
Definition: inits.c:34
FUNCTION void lrcache_free(cacheroot_t *pcroot)
Free the hash table.
Definition: liccache.c:83
int DEB_BINARY
Definition: buckets.c:73
FUNCTION int validate_pk(PGconn *pgConn, char *sql)
Verify a primary key exists.
Definition: validate.c:95
char applies_to
Definition: buckets.h:80
FUNCTION int lrcache_init(PGconn *pgConn, cacheroot_t *pcroot)
Build a cache the license ref db table.
Definition: liccache.c:185
int fo_checkPQcommand(PGconn *pgConn, PGresult *result, char *sql, char *FileID, int LineNumb)
Check the result status of a postgres commands (not select) If an error occured, write the error to s...
Definition: libfossdb.c:215
int fo_scheduler_userID()
Gets the id of the user that created the job that the agent is running.
int verbose
The verbose flag for the cli.
Definition: fo_cli.c:49
Usage()
Print Usage statement.
Definition: fo_dbcheck.php:75
Contains information required by uploadtree elements.
Definition: adj2nest.c:104
char * uploadtree_tablename
upload.uploadtree_tablename
Definition: adj2nest.c:112
int agent_pk
Definition: agent.h:85
FUNCTION int fo_GetAgentKey(PGconn *pgConn, const char *agent_name, long Upload_pk, const char *rev, const char *agent_desc)
Get the latest enabled agent key (agent_pk) from the database.
Definition: libfossagent.c:172
char * fo_scheduler_next()
Get the next data to process from the scheduler.
FUNCTION int GetUploadPerm(PGconn *pgConn, long UploadPk, int user_pk)
Get users permission to this upload.
Definition: libfossagent.c:385
FUNCTION int walkTree(PGconn *pgConn, pbucketdef_t bucketDefArray, int agent_pk, int uploadtree_pk, int skipProcessedCheck, int hasPrules)
This function does a recursive depth first walk through a file tree (uploadtree). ...
Definition: walk.c:45
int nomos_agent_pk
Definition: buckets.h:81
char * ufile_name
Definition: buckets.h:113
FUNCTION int UploadProcessed(PGconn *pgConn, int bucketagent_pk, int nomosagent_pk, int pfile_pk, int uploadtree_pk, int upload_pk, int bucketpool_pk)
Has this upload already been bucket processed? This function checks buckets_ars to see if the upload ...
Definition: validate.c:215
FUNCTION pbucketdef_t initBuckets(PGconn *pgConn, int bucketpool_pk, cacheroot_t *pcroot)
Initialize the bucket definition list. If an error occured, write the error to stdout.
Definition: inits.c:63
char * fo_sysconfig(const char *sectionname, const char *variablename)
gets a system configuration variable from the configuration data.
#define PERM_WRITE
Read-Write permission.
Definition: libfossology.h:45