ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/ns_dev/Python/NinoCode/Tool_Box/EclipseSQLLib.py
Revision: 650
Committed: Thu Dec 12 20:43:50 2019 UTC (6 years, 3 months ago) by nino.borges
Content type: text/x-python
File size: 18326 byte(s)
Log Message:
A simple class library for automating Eclipse through SQL

File Contents

# User Rev Content
1 nino.borges 650 """
2    
3     EclipseSQLLib
4    
5     Created by
6     Emanuel Borges
7     05.30.2018
8    
9     A proper class for automating Eclipse through SQL.
10    
11     """
12    
13    
14     import pyodbc
15     import SQLBlobs
16    
17    
18     class Eclipse_Connection(object):
19     """An Advanced Eclipse Connection Library and Toolkit"""
20     version = '0.6.0'
21    
22     def __init__(self):
23     self.caseMatrix = None
24     self.caseList = None
25     self.fieldMatrix = None
26     self.fieldList = None
27     self.currentDatabaseName = None
28     self.batchSetList = None
29     self.tagGroupMatrix = None
30     self.tagNameMatrix = None
31     self.tagGroupToTagNameMatrix = None
32     self._UpdateCaseMatrix()
33     print "\nEclipse SQL Library Initialized.\n\nCreated by Emanuel Borges\nVersion %s\n\n"% self.version
34    
35     def _UpdateCaseMatrix(self):
36     """Will normally run only once when initiated, which populates the case Matrix and the sorted case list."""
37     matrix = {}
38    
39     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=AddConfig;""Trusted_Connection=yes;")
40     cursor = cnxn.cursor()
41    
42     cursor.execute('select name,databasename from Enterprise.CaseProductEnvironment where ProductId=3')
43    
44     for row in cursor:
45     matrix[row.name] = row.databasename
46    
47    
48     self.caseMatrix = matrix
49     self.caseList = self.caseMatrix.keys()
50     self.caseList.sort()
51     cnxn.close()
52    
53     def _UpdateBatchSetList(self):
54     """Will update the list of batch set names that exist in a db"""
55     batchSetList = []
56     internalCaseName = self.caseMatrix[self.currentDatabaseName]
57    
58     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
59     cursor = cnxn.cursor()
60    
61     cursor.execute('SELECT ReviewPassName FROM Review.ReviewPass ORDER BY ReviewPassName')
62    
63     for row in cursor:
64     batchSetList.append(row.ReviewPassName)
65    
66     if batchSetList:
67     batchSetList.sort()
68     self.batchSetList = batchSetList
69     else:
70     self.batchSetList = None
71    
72     cnxn.close()
73    
74    
75     def _UpdateTagGroupNameMatrixes(self):
76     """Updates three tag matrixes, tagGroup with id, tagNames with id and the tagGroup to tagName"""
77     tagGroupMatrix = {}
78     tagNameMatrix = {}
79     tagGroupToTagNameMatrix = {}
80    
81    
82     internalCaseName = self.caseMatrix[self.currentDatabaseName]
83    
84     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
85     cursor = cnxn.cursor()
86    
87     cursor.execute("SELECT TagGroups.TagGroupName, Tags.TagName, TagGroupsDetail.TagGroupId, TagGroupsDetail.TagId FROM TagGroups INNER JOIN TagGroupsDetail ON TagGroups.TagGroupId = TagGroupsDetail.TagGroupId INNER JOIN Tags ON TagGroupsDetail.TagId = Tags.TagId")
88    
89     for row in cursor:
90     tagGroupMatrix[row.TagGroupName] = row.TagGroupId
91     tagNameMatrix[row.TagName] = row.TagId
92     if row.TagGroupName in tagGroupToTagNameMatrix.keys():
93     tagGroupToTagNameMatrix[row.TagGroupName].append(row.TagName)
94     else:
95     tagGroupToTagNameMatrix[row.TagGroupName] = [row.TagName,]
96    
97     cnxn.close()
98    
99     self.tagGroupMatrix = tagGroupMatrix
100     self.tagNameMatrix = tagNameMatrix
101     self.tagGroupToTagNameMatrix = tagGroupToTagNameMatrix
102    
103    
104     def _QueryByTag(self, tagName):
105     """Performs a query by tag and returns a connection and cursor object. Dont forget to close both! For now this is private"""
106    
107     internalCaseName = self.caseMatrix[self.currentDatabaseName]
108    
109     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
110     cursor = cnxn.cursor()
111    
112     cursor.execute("SELECT Tags.TagName, Documents.DocumentKey FROM TagGroupsDetail INNER JOIN TagGroups ON TagGroupsDetail.TagGroupId = TagGroups.TagGroupId INNER JOIN Tags ON TagGroupsDetail.TagId = Tags.TagId INNER JOIN DocumentTags ON Tags.TagId = DocumentTags.TagId INNER JOIN Documents ON DocumentTags.DocId = Documents.DocId WHERE (Tags.TagName = '%s') ORDER BY DocumentKey"% tagName)
113    
114     return cnxn, cursor
115    
116    
117     def _ProcessSQLBlob(self, blobName, cursor):
118     """Internal method only. Will take a blobname, find it in the SQLBlob class and process it. There MUST be a GO a the end for this to work."""
119     sqlQuery = ""
120     rawBlob = SQLBlobs.BlobListMatrix.matrix[blobName]
121    
122     for i in rawBlob:
123     if i == 'GO\n':
124     cursor.execute(sqlQuery)
125     sqlQuery = ""
126     #elif 'PRINT' in i:
127     # display = i.split("'")[1]
128     # print(display, '\r')
129     else:
130     sqlQuery = sqlQuery + i
131    
132    
133     def SelectCase(self, caseName):
134     """Sets the current database name and updates the fieldMatrix and the sorted field list and updates the batchsetlist and the tagMatrixes too"""
135     self.currentDatabaseName = caseName
136    
137     matrix = {}
138    
139     internalCaseName = self.caseMatrix[caseName]
140    
141     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
142     cursor = cnxn.cursor()
143    
144     cursor.execute('SELECT TableName, ColumnName, FieldName FROM vFieldDefinition')
145    
146     for row in cursor:
147     matrix[row.FieldName] = (row.TableName,row.ColumnName)
148    
149     self.fieldMatrix = matrix
150     self.fieldList = self.fieldMatrix.keys()
151     self.fieldList.sort()
152    
153     cnxn.close()
154     self._UpdateBatchSetList()
155     self._UpdateTagGroupNameMatrixes()
156    
157    
158     def GetUniqueValues(self, fieldName):
159     """Returns a list of unique values in field."""
160     uniqueValueList = []
161    
162     internalCaseName = self.caseMatrix[self.currentDatabaseName]
163    
164     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
165     cursor = cnxn.cursor()
166    
167     internalTableName, internalFieldName = self.fieldMatrix[fieldName]
168    
169     cursor.execute('Select DISTINCT %s FROM %s'% (internalFieldName, internalTableName))
170    
171     for row in cursor:
172     #print row.Field000008
173     uniqueValueList.append(getattr(row,internalFieldName))
174    
175     uniqueValueList.sort()
176     cnxn.close()
177    
178     return uniqueValueList
179    
180     def GetAnalyticsCategorySetsValues(self):
181     """Returns a matrix of the values that a case has in all of the cat sets"""
182     matrix = {}
183    
184     internalCaseName = self.caseMatrix[self.currentDatabaseName]
185    
186     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
187     cursor = cnxn.cursor()
188    
189     cursor.execute('Select top 10000 b.CategoryName, c.DocumentKey, a.IsEntireDocument, a.DocumentText FROM Analytics.CategoryExemplarDocument a left join [Analytics].[Category] b on b.CategoryId=a.CategoryId left join dbo.documents c on c.docid=a.docid order by 2 asc')
190    
191     for row in cursor:
192     if row.IsEntireDocument:
193     docText = "FULL DOCUMENT"
194     else:
195     docText = row.DocumentText
196     if row.DocumentKey:
197     #docKey = "test"
198     docKey = row.DocumentKey
199     else:
200     docKey = "No Document"
201     if row.CategoryName in matrix.keys():
202     matrix[row.CategoryName].append((docKey,docText))
203     else:
204     matrix[row.CategoryName] = [(docKey,docText),]
205    
206     cnxn.close()
207    
208     return matrix
209    
210    
211     def GetProductionSize(self, prodVolFieldName, natFileSizeFieldName, volumeValue):
212     """Returns the total production size, in GB, for a production with the volume of the voumeValue you give it. It rounds the number and, if you are under .01 it will just make it .01. Finally also returns 0.0 if the value was empty."""
213     internalCaseName = self.caseMatrix[self.currentDatabaseName]
214    
215     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
216     cursor = cnxn.cursor()
217    
218     internalProdVolTableName, internalProdVolFieldName = self.fieldMatrix[prodVolFieldName]
219     internalNatFileSizeTableName, internalNatFileSizeFieldName = self.fieldMatrix[natFileSizeFieldName]
220    
221     if internalProdVolTableName == internalNatFileSizeTableName:
222     ## No join needed
223     cursor.execute("Select %s, %s FROM %s WHERE %s LIKE '%s'"% (internalProdVolFieldName, internalNatFileSizeFieldName, internalProdVolTableName, internalProdVolFieldName,volumeValue))
224     else:
225     cursor.execute("Select %s.%s, %s.%s FROM %s INNER JOIN %s ON %s.DocId = %s.DocId WHERE %s LIKE '%s'"% (internalProdVolTableName,internalProdVolFieldName,internalNatFileSizeTableName, internalNatFileSizeFieldName, internalProdVolTableName,internalNatFileSizeTableName,internalProdVolTableName,internalNatFileSizeTableName,internalProdVolFieldName,volumeValue))
226    
227     count = 0
228    
229     for row in cursor:
230     docSize = getattr(row,internalNatFileSizeFieldName)
231     if docSize:
232     count = count + docSize
233    
234    
235     count2 = count / 1024
236     count3 = count2 / 1024
237     cnxn.close()
238     finalCount = round(count3,2)
239     if count3:
240     if finalCount < 0.01:
241     finalCount = 0.01
242     print count3,finalCount
243     return finalCount
244    
245    
246     def UpdateEndDocByTag(self, tagName, endDocField):
247     """Updates the endDocField, filtering by a tag, but only the null value ones, by copying from the evidoxID field"""
248     internalCaseName = self.caseMatrix[self.currentDatabaseName]
249    
250     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
251     cursor = cnxn.cursor()
252    
253     cursor.execute("SELECT Tags.TagName, Documents.DocumentKey FROM TagGroupsDetail INNER JOIN TagGroups ON TagGroupsDetail.TagGroupId = TagGroups.TagGroupId INNER JOIN Tags ON TagGroupsDetail.TagId = Tags.TagId INNER JOIN DocumentTags ON Tags.TagId = DocumentTags.TagId INNER JOIN Documents ON DocumentTags.DocId = Documents.DocId WHERE (Tags.TagName = '%s') ORDER BY DocumentKey"% tagName)
254    
255    
256    
257     def UpdateBatchReviewedStatus(self, batchSetName, queryOnly = True):
258     """Updates the batch reviewed status flag for a particular batch set by name. default is to query only, which wont actually do any updates and only return the count"""
259     internalCaseName = self.caseMatrix[self.currentDatabaseName]
260    
261     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
262     cursor = cnxn.cursor()
263    
264    
265     if queryOnly:
266     print "running test only"
267     cursor.execute("SELECT Review.BatchDocument.ReviewStatusId, Tags.TagName, Review.ReviewPass.ReviewPassName FROM TagGroupsDetail INNER JOIN TagGroups ON TagGroupsDetail.TagGroupId = TagGroups.TagGroupId INNER JOIN Review.BatchDocument INNER JOIN DocumentTags ON Review.BatchDocument.DocId = DocumentTags.DocId INNER JOIN DocumentFields_0002 ON Review.BatchDocument.DocId = DocumentFields_0002.DocId ON TagGroupsDetail.TagId = DocumentTags.TagId INNER JOIN Tags ON TagGroupsDetail.TagId = Tags.TagId AND DocumentTags.TagId = Tags.TagId INNER JOIN Review.Batch ON Review.BatchDocument.BatchId = Review.Batch.BatchId AND Review.BatchDocument.BatchId = Review.Batch.BatchId INNER JOIN Review.ReviewPass ON Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId WHERE (Review.BatchDocument.ReviewStatusId = '0') AND (TagGroups.TagGroupName = N'Doc Review Designation') AND (Tags.TagName IS NOT NULL) AND (Review.ReviewPass.ReviewPassName = N'%s')"% batchSetName)
268     toBeUpdatedCount = len(cursor.fetchall())
269     else:
270     print "performing the update"
271     cursor.execute("UPDATE Review.BatchDocument SET ReviewStatusId = '1' FROM TagGroupsDetail INNER JOIN TagGroups ON TagGroupsDetail.TagGroupId = TagGroups.TagGroupId INNER JOIN Review.BatchDocument INNER JOIN DocumentTags ON Review.BatchDocument.DocId = DocumentTags.DocId INNER JOIN DocumentFields_0002 ON Review.BatchDocument.DocId = DocumentFields_0002.DocId ON TagGroupsDetail.TagId = DocumentTags.TagId INNER JOIN Tags ON TagGroupsDetail.TagId = Tags.TagId AND DocumentTags.TagId = Tags.TagId INNER JOIN Review.Batch ON Review.BatchDocument.BatchId = Review.Batch.BatchId AND Review.BatchDocument.BatchId = Review.Batch.BatchId INNER JOIN Review.ReviewPass ON Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId WHERE (Review.BatchDocument.ReviewStatusId = '0') AND (TagGroups.TagGroupName = N'Doc Review Designation') AND (Tags.TagName IS NOT NULL) AND (Review.ReviewPass.ReviewPassName = N'%s')"% batchSetName)
272     toBeUpdatedCount = cursor.rowcount
273     cnxn.commit()
274    
275     cnxn.close()
276    
277     return toBeUpdatedCount
278    
279     def GenerateSavedSearchesReport (self, pipeFile):
280     """Generates a pipe delimited report from the saved searches"""
281     internalCaseName = self.caseMatrix[self.currentDatabaseName]
282    
283     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
284     cursor = cnxn.cursor()
285    
286     ## First grab the parent folder matrix and prepopulate the -1 and 0 folders
287     folderMatrix = {}
288     folderMatrix[0] = ['<RootFolder>',0,0,'Nobody']
289     folderMatrix[-1] = ['<PrivateFolder>',0,0,'Nobody']
290    
291     cursor.execute('SELECT SavedSearch.Folders.* FROM SavedSearch.Folders')
292     for row in cursor:
293     folderMatrix[getattr(row,'FolderId')] = [getattr(row,'FolderName'),getattr(row,'ParentFolderId'),getattr(row,'CreatedByUserName')]
294    
295    
296     ## Now create the pipedelim file
297     outputFile = open(pipeFile,'w')
298     #outputFile.write(foo.encode('utf8'))
299     outputFile.write(u'UserName|ParentFolderId|SearchId|SearchName|CreatedDate|ModifiedDate|SearchItems\n'.encode('utf8'))
300    
301     cursor.execute('SELECT ActivityTracking.DIM_User.UserName,SavedSearch.Searches.ParentFolderId,SavedSearch.Searches.SearchId, SavedSearch.Searches.SearchName, SavedSearch.Searches.CreatedDate,SavedSearch.Searches.ModifiedDate, SavedSearch.Searches.SearchItems FROM ActivityTracking.DIM_User INNER JOIN SavedSearch.Searches ON ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.CreatedByKey AND ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.ModifiedByKey AND ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.CreatedByKey AND ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.ModifiedByKey')
302    
303     for row in cursor:
304     #outputFile.write("%s|%s|%s|%s|%s|%s|%s\n"% (getattr(row,'UserName'),folderMatrix[getattr(row,'ParentFolderId')][0],getattr(row,'SearchId'),getattr(row,'SearchName'),getattr(row,'CreatedDate'),getattr(row,'ModifiedDate',getattr(row,'SearchItems')))
305     outputFile.write(u"{0}|{1}|{2}|{3}|{4}|{5}|{6}\n".format(getattr(row,u'UserName'),folderMatrix[getattr(row,u'ParentFolderId')][0],getattr(row,u'SearchId'),getattr(row,u'SearchName'),getattr(row,u'CreatedDate'),getattr(row,u'ModifiedDate'),getattr(row,u'SearchItems')).encode('utf8'))
306    
307     outputFile.close()
308     cnxn.close()
309    
310     #return folderMatrix
311    
312    
313    
314     def RunDatabaseDefrag (self, viewOnly = True):
315     """Runs a database defrag on the current database. default is viewonly, which wont actually run it. overide by setting to false to run"""
316     internalCaseName = self.caseMatrix[self.currentDatabaseName]
317    
318     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
319     cursor = cnxn.cursor()
320    
321     ## with the db selected, now execute the sp_updatestats part
322     print "\nPerforming SP_Updatestats..."
323     cursor.execute("sp_updatestats")
324     cnxn.close()
325     print "SP_UpdateStats finished."
326    
327     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=evdxAdmin;""Trusted_Connection=yes;")
328     cursor = cnxn.cursor()
329    
330    
331     ## with the eevdxadmin db selected, now run the ap_rebuildindexes part
332     print "\nPerforming ap_rebuildIndexes..."
333     cursor.execute("exec ap_rebuildindexes @databasename = '%s'"%internalCaseName)
334     print "rebuildIndexes finished"
335    
336     cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName)
337     cursor = cnxn.cursor()
338    
339     ## Now read and run the rest by way of a blob.
340     print "\nNow performing full defrag..."
341     self._ProcessSQLBlob('defragBlob',cursor)
342    
343     cnxn.close()
344     print "Full Defrag Finished."
345    
346