uawdijnntqw1x1x1
IP : 216.73.216.153
Hostname : 6.87.74.97.host.secureserver.net
Kernel : Linux 6.87.74.97.host.secureserver.net 4.18.0-553.83.1.el8_10.x86_64 #1 SMP Mon Nov 10 04:22:44 EST 2025 x86_64
Disable Function : None :)
OS : Linux
PATH:
/
home
/
emeraadmin
/
www
/
4d695
/
..
/
Service
/
..
/
4d695
/
plugins.tar
/
/
grid.setcolumns.js000064400000012611151677265110010236 0ustar00;(function($){ /** * jqGrid extension for manipulating columns properties * Piotr Roznicki roznicki@o2.pl * http://www.roznicki.prv.pl * Dual licensed under the MIT and GPL licenses: * http://www.opensource.org/licenses/mit-license.php * http://www.gnu.org/licenses/gpl-2.0.html **/ $.jgrid.extend({ setColumns : function(p) { p = $.extend({ top : 0, left: 0, width: 200, height: 'auto', dataheight: 'auto', modal: false, drag: true, beforeShowForm: null, afterShowForm: null, afterSubmitForm: null, closeOnEscape : true, ShrinkToFit : false, jqModal : false, saveicon: [true,"left","ui-icon-disk"], closeicon: [true,"left","ui-icon-close"], onClose : null, colnameview : true, closeAfterSubmit : true, updateAfterCheck : false, recreateForm : false }, $.jgrid.col, p ||{}); return this.each(function(){ var $t = this; if (!$t.grid ) { return; } var onBeforeShow = typeof p.beforeShowForm === 'function' ? true: false; var onAfterShow = typeof p.afterShowForm === 'function' ? true: false; var onAfterSubmit = typeof p.afterSubmitForm === 'function' ? true: false; var gID = $t.p.id, dtbl = "ColTbl_"+gID, IDs = {themodal:'colmod'+gID,modalhead:'colhd'+gID,modalcontent:'colcnt'+gID, scrollelm: dtbl}; if(p.recreateForm===true && $("#"+IDs.themodal).html() != null) { $("#"+IDs.themodal).remove(); } if ( $("#"+IDs.themodal).html() != null ) { if(onBeforeShow) { p.beforeShowForm($("#"+dtbl)); } $.jgrid.viewModal("#"+IDs.themodal,{gbox:"#gbox_"+gID,jqm:p.jqModal, jqM:false, modal:p.modal}); if(onAfterShow) { p.afterShowForm($("#"+dtbl)); } } else { var dh = isNaN(p.dataheight) ? p.dataheight : p.dataheight+"px"; var formdata = "<div id='"+dtbl+"' class='formdata' style='width:100%;overflow:auto;position:relative;height:"+dh+";'>"; formdata += "<table class='ColTable' cellspacing='1' cellpading='2' border='0'><tbody>"; for(i=0;i<this.p.colNames.length;i++){ if(!$t.p.colModel[i].hidedlg) { // added from T. Tomov formdata += "<tr><td style='white-space: pre;'><input type='checkbox' style='margin-right:5px;' id='col_" + this.p.colModel[i].name + "' class='cbox' value='T' " + ((this.p.colModel[i].hidden===false)?"checked":"") + "/>" + "<label for='col_" + this.p.colModel[i].name + "'>" + this.p.colNames[i] + ((p.colnameview) ? " (" + this.p.colModel[i].name + ")" : "" )+ "</label></td></tr>"; } } formdata += "</tbody></table></div>" var bS = !p.updateAfterCheck ? "<a href='javascript:void(0)' id='dData' class='fm-button ui-state-default ui-corner-all'>"+p.bSubmit+"</a>" : "", bC ="<a href='javascript:void(0)' id='eData' class='fm-button ui-state-default ui-corner-all'>"+p.bCancel+"</a>"; formdata += "<table border='0' class='EditTable' id='"+dtbl+"_2'><tbody><tr style='display:block;height:3px;'><td></td></tr><tr><td class='DataTD ui-widget-content'></td></tr><tr><td class='ColButton EditButton'>"+bS+" "+bC+"</td></tr></tbody></table>"; p.gbox = "#gbox_"+gID; $.jgrid.createModal(IDs,formdata,p,"#gview_"+$t.p.id,$("#gview_"+$t.p.id)[0]); if(p.saveicon[0]==true) { $("#dData","#"+dtbl+"_2").addClass(p.saveicon[1] == "right" ? 'fm-button-icon-right' : 'fm-button-icon-left') .append("<span class='ui-icon "+p.saveicon[2]+"'></span>"); } if(p.closeicon[0]==true) { $("#eData","#"+dtbl+"_2").addClass(p.closeicon[1] == "right" ? 'fm-button-icon-right' : 'fm-button-icon-left') .append("<span class='ui-icon "+p.closeicon[2]+"'></span>"); } if(!p.updateAfterCheck) { $("#dData","#"+dtbl+"_2").click(function(e){ for(i=0;i<$t.p.colModel.length;i++){ if(!$t.p.colModel[i].hidedlg) { // added from T. Tomov var nm = $t.p.colModel[i].name.replace(/\./g, "\\."); if($("#col_" + nm,"#"+dtbl).attr("checked")) { $($t).jqGrid("showCol",$t.p.colModel[i].name); $("#col_" + nm,"#"+dtbl).attr("defaultChecked",true); // Added from T. Tomov IE BUG } else { $($t).jqGrid("hideCol",$t.p.colModel[i].name); $("#col_" + nm,"#"+dtbl).attr("defaultChecked",""); // Added from T. Tomov IE BUG } } } if(p.ShrinkToFit===true) { $($t).jqGrid("setGridWidth",$t.grid.width-0.001,true); } if(p.closeAfterSubmit) $.jgrid.hideModal("#"+IDs.themodal,{gb:"#gbox_"+gID,jqm:p.jqModal, onClose: p.onClose}); if (onAfterSubmit) { p.afterSubmitForm($("#"+dtbl)); } return false; }); } else { $(":input","#"+dtbl).click(function(e){ var cn = this.id.substr(4); if(cn){ if(this.checked) { $($t).jqGrid("showCol",cn); } else { $($t).jqGrid("hideCol",cn); } if(p.ShrinkToFit===true) { $($t).jqGrid("setGridWidth",$t.grid.width-0.001,true); } } return this; }); } $("#eData", "#"+dtbl+"_2").click(function(e){ $.jgrid.hideModal("#"+IDs.themodal,{gb:"#gbox_"+gID,jqm:p.jqModal, onClose: p.onClose}); return false; }); $("#dData, #eData","#"+dtbl+"_2").hover( function(){$(this).addClass('ui-state-hover');}, function(){$(this).removeClass('ui-state-hover');} ); if(onBeforeShow) { p.beforeShowForm($("#"+dtbl)); } $.jgrid.viewModal("#"+IDs.themodal,{gbox:"#gbox_"+gID,jqm:p.jqModal, jqM: true, modal:p.modal}); if(onAfterShow) { p.afterShowForm($("#"+dtbl)); } } }); } }); })(jQuery);grid.addons.js000064400000062222151677265110007315 0ustar00(function($){ /* * jqGrid methods without support. Use as you wish * Tony Tomov tony@trirand.com * http://trirand.com/blog/ * Dual licensed under the MIT and GPL licenses: * http://www.opensource.org/licenses/mit-license.php * http://www.gnu.org/licenses/gpl-2.0.html * * This list of deprecated methods. * If you instead want to use them, please include this file after the grid main file. * Some methods will be then overwritten. * */ /*global jQuery, $ */ $.jgrid.extend({ // This is the ols search Filter method used in navigator. searchGrid : function (p) { p = $.extend({ recreateFilter: false, drag: true, sField:'searchField', sValue:'searchString', sOper: 'searchOper', sFilter: 'filters', loadDefaults: true, // this options activates loading of default filters from grid's postData for Multipe Search only. beforeShowSearch: null, afterShowSearch : null, onInitializeSearch: null, closeAfterSearch : false, closeAfterReset: false, closeOnEscape : false, multipleSearch : false, cloneSearchRowOnAdd: true, // translation // if you want to change or remove the order change it in sopt // ['bw','eq','ne','lt','le','gt','ge','ew','cn'] sopt: null, // Note: stringResult is intentionally declared "undefined by default". // you are velcome to define stringResult expressly in the options you pass to searchGrid() // stringResult is a "safeguard" measure to insure we post sensible data when communicated as form-encoded // see http://github.com/tonytomov/jqGrid/issues/#issue/36 // // If this value is not expressly defined in the incoming options, // lower in the code we will infer the value based on value of multipleSearch stringResult: undefined, onClose : null, // useDataProxy allows ADD, EDIT and DEL code to bypass calling $.ajax // directly when grid's 'dataProxy' property (grid.p.dataProxy) is a function. // Used for "editGridRow" and "delGridRow" below and automatically flipped to TRUE // when ajax setting's 'url' (grid's 'editurl') property is undefined. // When 'useDataProxy' is true, instead of calling $.ajax.call(gridDOMobj, o, i) we call // gridDOMobj.p.dataProxy.call(gridDOMobj, o, i) // // Behavior is extremely similar to when 'datatype' is a function, but arguments are slightly different. // Normally the following is fed to datatype.call(a, b, c): // a = Pointer to grid's table DOM element, b = grid.p.postdata, c = "load_"+grid's ID // In cases of "edit" and "del" the following is fed: // a = Pointer to grid's table DOM element (same), // b = extended Ajax Options including postdata in "data" property. (different object type) // c = "set_"+grid's ID in case of "edit" and "del_"+grid's ID in case of "del" (same type, different content) // The major difference is that complete ajax options object, with attached "complete" and "error" // callback functions is fed instead of only post data. // This allows you to emulate a $.ajax call (including calling "complete"/"error"), // while retrieving the data locally in the browser. useDataProxy: false, overlay : true }, $.jgrid.search, p || {}); return this.each(function() { var $t = this; if(!$t.grid) {return;} var fid = "fbox_"+$t.p.id, showFrm = true; function applyDefaultFilters(gridDOMobj, filterSettings) { /* gridDOMobj = ointer to grid DOM object ( $(#list)[0] ) What we need from gridDOMobj: gridDOMobj.SearchFilter is the pointer to the Search box, once it's created. gridDOMobj.p.postData - dictionary of post settings. These can be overriden at grid creation to contain default filter settings. We will parse these and will populate the search with defaults. filterSettings - same settings object you (would) pass to $().jqGrid('searchGrid', filterSettings); */ // Pulling default filter settings out of postData property of grid's properties.: var defaultFilters = gridDOMobj.p.postData[filterSettings.sFilter]; // example of what we might get: {"groupOp":"and","rules":[{"field":"amount","op":"eq","data":"100"}]} // suppose we have imported this with grid import, the this is a string. if(typeof(defaultFilters) == "string") { defaultFilters = $.jgrid.parse(defaultFilters); } if (defaultFilters) { if (defaultFilters.groupOp) { gridDOMobj.SearchFilter.setGroupOp(defaultFilters.groupOp); } if (defaultFilters.rules) { var f, i = 0, li = defaultFilters.rules.length, success = false; for (; i < li; i++) { f = defaultFilters.rules[i]; // we are not trying to counter all issues with filter declaration here. Just the basics to avoid lookup exceptions. if (f.field !== undefined && f.op !== undefined && f.data !== undefined) { success = gridDOMobj.SearchFilter.setFilter({ 'sfref':gridDOMobj.SearchFilter.$.find(".sf:last"), 'filter':$.extend({},f) }); if (success) { gridDOMobj.SearchFilter.add(); } } } } } } // end of applyDefaultFilters function hideFilter(selector) { if(p.onClose){ var fclm = p.onClose(selector); if(typeof fclm == 'boolean' && !fclm) { return; } } selector.hide(); if(p.overlay === true) { $(".jqgrid-overlay:first","#gbox_"+$t.p.id).hide(); } } function showFilter(){ var fl = $(".ui-searchFilter").length; if(fl > 1) { var zI = $("#"+fid).css("zIndex"); $("#"+fid).css({zIndex:parseInt(zI,10)+fl}); } $("#"+fid).show(); if(p.overlay === true) { $(".jqgrid-overlay:first","#gbox_"+$t.p.id).show(); } try{$(':input:visible',"#"+fid)[0].focus();}catch(_){} } function searchFilters(filters) { var hasFilters = (filters !== undefined), grid = $("#"+$t.p.id), sdata={}; if(p.multipleSearch===false) { sdata[p.sField] = filters.rules[0].field; sdata[p.sValue] = filters.rules[0].data; sdata[p.sOper] = filters.rules[0].op; if(sdata.hasOwnProperty(p.sFilter) ) { delete sdata[p.sFilter]; } } else { sdata[p.sFilter] = filters; $.each([p.sField, p.sValue, p.sOper], function(i, n){ if(sdata.hasOwnProperty(n)) { delete sdata[n];} }); } grid[0].p.search = hasFilters; $.extend(grid[0].p.postData,sdata); grid.trigger("reloadGrid",[{page:1}]); if(p.closeAfterSearch) { hideFilter($("#"+fid)); } } function resetFilters(op) { var reload = op && op.hasOwnProperty("reload") ? op.reload : true, grid = $("#"+$t.p.id), sdata={}; grid[0].p.search = false; if(p.multipleSearch===false) { sdata[p.sField] = sdata[p.sValue] = sdata[p.sOper] = ""; } else { sdata[p.sFilter] = ""; } $.extend(grid[0].p.postData,sdata); if(reload) { grid.trigger("reloadGrid",[{page:1}]); } if(p.closeAfterReset) { hideFilter($("#"+fid)); } } if($.fn.searchFilter) { if(p.recreateFilter===true) {$("#"+fid).remove();} if( $("#"+fid).html() != null ) { if ( $.isFunction(p.beforeShowSearch) ) { showFrm = p.beforeShowSearch($("#"+fid)); if(typeof(showFrm) == "undefined") { showFrm = true; } } if(showFrm === false) { return; } showFilter(); if( $.isFunction(p.afterShowSearch) ) { p.afterShowSearch($("#"+fid)); } } else { var fields = [], colNames = $("#"+$t.p.id).jqGrid("getGridParam","colNames"), colModel = $("#"+$t.p.id).jqGrid("getGridParam","colModel"), stempl = ['eq','ne','lt','le','gt','ge','bw','bn','in','ni','ew','en','cn','nc'], j,pos,k,oprtr=[]; if (p.sopt !==null) { k=0; for(j=0;j<p.sopt.length;j++) { if( (pos= $.inArray(p.sopt[j],stempl)) != -1 ){ oprtr[k] = {op:p.sopt[j],text: p.odata[pos].text}; k++; } } } else { for(j=0;j<stempl.length;j++) { oprtr[j] = {op:stempl[j],text: p.odata[j].text}; } } $.each(colModel, function(i, v) { var searchable = (typeof v.search === 'undefined') ? true: v.search , hidden = (v.hidden === true), soptions = $.extend({}, {text: colNames[i], itemval: v.index || v.name}, this.searchoptions), ignoreHiding = (soptions.searchhidden === true); if(typeof soptions.sopt !== 'undefined') { k=0; soptions.ops =[]; if(soptions.sopt.length>0) { for(j=0;j<soptions.sopt.length;j++) { if( (pos= $.inArray(soptions.sopt[j],stempl)) != -1 ){ soptions.ops[k] = {op:soptions.sopt[j],text: p.odata[pos].text}; k++; } } } } if(typeof(this.stype) === 'undefined') { this.stype='text'; } if(this.stype == 'select') { if ( soptions.dataUrl !== undefined) {} else { var eov; if(soptions.value) { eov = soptions.value; } else if(this.editoptions) { eov = this.editoptions.value; } if(eov) { soptions.dataValues =[]; if(typeof(eov) === 'string') { var so = eov.split(";"),sv; for(j=0;j<so.length;j++) { sv = so[j].split(":"); soptions.dataValues[j] ={value:sv[0],text:sv[1]}; } } else if (typeof(eov) === 'object') { j=0; for (var key in eov) { if(eov.hasOwnProperty(key)) { soptions.dataValues[j] ={value:key,text:eov[key]}; j++; } } } } } } if ((ignoreHiding && searchable) || (searchable && !hidden)) { fields.push(soptions); } }); if(fields.length>0){ $("<div id='"+fid+"' role='dialog' tabindex='-1'></div>").insertBefore("#gview_"+$t.p.id); // Before we create searchFilter we need to decide if we want to get back a string or a JS object. // see http://github.com/tonytomov/jqGrid/issues/#issue/36 for background on the issue. // If p.stringResult is defined, it was explisitly passed to us by user. Honor the choice, whatever it is. if (p.stringResult===undefined) { // to provide backward compatibility, inferring stringResult value from multipleSearch p.stringResult = p.multipleSearch; } // we preserve the return value here to retain access to .add() and other good methods of search form. $t.SearchFilter = $("#"+fid).searchFilter(fields, { groupOps: p.groupOps, operators: oprtr, onClose:hideFilter, resetText: p.Reset, searchText: p.Find, windowTitle: p.caption, rulesText:p.rulesText, matchText:p.matchText, onSearch: searchFilters, onReset: resetFilters,stringResult:p.stringResult, ajaxSelectOptions: $.extend({},$.jgrid.ajaxOptions,$t.p.ajaxSelectOptions ||{}), clone: p.cloneSearchRowOnAdd }); $(".ui-widget-overlay","#"+fid).remove(); if($t.p.direction=="rtl") { $(".ui-closer","#"+fid).css("float","left"); } if (p.drag===true) { $("#"+fid+" table thead tr:first td:first").css('cursor','move'); if(jQuery.fn.jqDrag) { $("#"+fid).jqDrag($("#"+fid+" table thead tr:first td:first")); } else { try { $("#"+fid).draggable({handle: $("#"+fid+" table thead tr:first td:first")}); } catch (e) {} } } if(p.multipleSearch === false) { $(".ui-del, .ui-add, .ui-del, .ui-add-last, .matchText, .rulesText", "#"+fid).hide(); $("select[name='groupOp']","#"+fid).hide(); } if (p.multipleSearch === true && p.loadDefaults === true) { applyDefaultFilters($t, p); } if ( $.isFunction(p.onInitializeSearch) ) { p.onInitializeSearch( $("#"+fid) ); } if ( $.isFunction(p.beforeShowSearch) ) { showFrm = p.beforeShowSearch($("#"+fid)); if(typeof(showFrm) == "undefined") { showFrm = true; } } if(showFrm === false) { return; } showFilter(); if( $.isFunction(p.afterShowSearch) ) { p.afterShowSearch($("#"+fid)); } if(p.closeOnEscape===true){ $("#"+fid).keydown( function( e ) { if( e.which == 27 ) { hideFilter($("#"+fid)); } if (e.which == 13) { $(".ui-search", this).click(); } }); } } } } }); }, // methods taken from grid.custom. updateGridRows : function (data, rowidname, jsonreader) { var nm, success=false, title; this.each(function(){ var t = this, vl, ind, srow, sid; if(!t.grid) {return false;} if(!rowidname) { rowidname = "id"; } if( data && data.length >0 ) { $(data).each(function(j){ srow = this; ind = t.rows.namedItem(srow[rowidname]); if(ind) { sid = srow[rowidname]; if(jsonreader === true){ if(t.p.jsonReader.repeatitems === true) { if(t.p.jsonReader.cell) {srow = srow[t.p.jsonReader.cell];} for (var k=0;k<srow.length;k++) { vl = t.formatter( sid, srow[k], k, srow, 'edit'); title = t.p.colModel[k].title ? {"title":$.jgrid.stripHtml(vl)} : {}; if(t.p.treeGrid===true && nm == t.p.ExpandColumn) { $("td:eq("+k+") > span:first",ind).html(vl).attr(title); } else { $("td:eq("+k+")",ind).html(vl).attr(title); } } success = true; return true; } } $(t.p.colModel).each(function(i){ nm = jsonreader===true ? this.jsonmap || this.name :this.name; if( srow[nm] !== undefined) { vl = t.formatter( sid, srow[nm], i, srow, 'edit'); title = this.title ? {"title":$.jgrid.stripHtml(vl)} : {}; if(t.p.treeGrid===true && nm == t.p.ExpandColumn) { $("td:eq("+i+") > span:first",ind).html(vl).attr(title); } else { $("td:eq("+i+")",ind).html(vl).attr(title); } success = true; } }); } }); } }); return success; }, // Form search - sorry for this method. Instead use ne jqFilter method. filterGrid : function(gridid,p){ p = $.extend({ gridModel : false, gridNames : false, gridToolbar : false, filterModel: [], // label/name/stype/defval/surl/sopt formtype : "horizontal", // horizontal/vertical autosearch: true, // if set to false a serch button should be enabled. formclass: "filterform", tableclass: "filtertable", buttonclass: "filterbutton", searchButton: "Search", clearButton: "Clear", enableSearch : false, enableClear: false, beforeSearch: null, afterSearch: null, beforeClear: null, afterClear: null, url : '', marksearched: true },p || {}); return this.each(function(){ var self = this; this.p = p; if(this.p.filterModel.length === 0 && this.p.gridModel===false) { alert("No filter is set"); return;} if( !gridid) {alert("No target grid is set!"); return;} this.p.gridid = gridid.indexOf("#") != -1 ? gridid : "#"+gridid; var gcolMod = $(this.p.gridid).jqGrid("getGridParam",'colModel'); if(gcolMod) { if( this.p.gridModel === true) { var thegrid = $(this.p.gridid)[0]; var sh; // we should use the options search, edittype, editoptions // additionally surl and defval can be added in grid colModel $.each(gcolMod, function (i,n) { var tmpFil = []; this.search = this.search === false ? false : true; if(this.editrules && this.editrules.searchhidden === true) { sh = true; } else { if(this.hidden === true ) { sh = false; } else { sh = true; } } if( this.search === true && sh === true) { if(self.p.gridNames===true) { tmpFil.label = thegrid.p.colNames[i]; } else { tmpFil.label = ''; } tmpFil.name = this.name; tmpFil.index = this.index || this.name; // we support only text and selects, so all other to text tmpFil.stype = this.edittype || 'text'; if(tmpFil.stype != 'select' ) { tmpFil.stype = 'text'; } tmpFil.defval = this.defval || ''; tmpFil.surl = this.surl || ''; tmpFil.sopt = this.editoptions || {}; tmpFil.width = this.width; self.p.filterModel.push(tmpFil); } }); } else { $.each(self.p.filterModel,function(i,n) { for(var j=0;j<gcolMod.length;j++) { if(this.name == gcolMod[j].name) { this.index = gcolMod[j].index || this.name; break; } } if(!this.index) { this.index = this.name; } }); } } else { alert("Could not get grid colModel"); return; } var triggerSearch = function() { var sdata={}, j=0, v; var gr = $(self.p.gridid)[0], nm; gr.p.searchdata = {}; if($.isFunction(self.p.beforeSearch)){self.p.beforeSearch();} $.each(self.p.filterModel,function(i,n){ nm = this.index; if(this.stype === 'select') { v = $("select[name="+nm+"]",self).val(); if(v) { sdata[nm] = v; if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).addClass("dirty-cell"); } j++; } else { if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).removeClass("dirty-cell"); } try { delete gr.p.postData[this.index]; } catch (e) {} } } else { v = $("input[name="+nm+"]",self).val(); if(v) { sdata[nm] = v; if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).addClass("dirty-cell"); } j++; } else { if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).removeClass("dirty-cell"); } try { delete gr.p.postData[this.index]; } catch(x) {} } } }); var sd = j>0 ? true : false; $.extend(gr.p.postData,sdata); var saveurl; if(self.p.url) { saveurl = $(gr).jqGrid("getGridParam",'url'); $(gr).jqGrid("setGridParam",{url:self.p.url}); } $(gr).jqGrid("setGridParam",{search:sd}).trigger("reloadGrid",[{page:1}]); if(saveurl) {$(gr).jqGrid("setGridParam",{url:saveurl});} if($.isFunction(self.p.afterSearch)){self.p.afterSearch();} }; var clearSearch = function(){ var sdata={}, v, j=0; var gr = $(self.p.gridid)[0], nm; if($.isFunction(self.p.beforeClear)){self.p.beforeClear();} $.each(self.p.filterModel,function(i,n){ nm = this.index; v = (this.defval) ? this.defval : ""; if(!this.stype){this.stype='text';} switch (this.stype) { case 'select' : var v1; $("select[name="+nm+"] option",self).each(function (i){ if(i===0) { this.selected = true; } if ($(this).text() == v) { this.selected = true; v1 = $(this).val(); return false; } }); if(v1) { // post the key and not the text sdata[nm] = v1; if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).addClass("dirty-cell"); } j++; } else { if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).removeClass("dirty-cell"); } try { delete gr.p.postData[this.index]; } catch (e) {} } break; case 'text': $("input[name="+nm+"]",self).val(v); if(v) { sdata[nm] = v; if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).addClass("dirty-cell"); } j++; } else { if(self.p.marksearched){ $("#jqgh_"+this.name,gr.grid.hDiv).removeClass("dirty-cell"); } try { delete gr.p.postData[this.index]; } catch (k) {} } break; } }); var sd = j>0 ? true : false; $.extend(gr.p.postData,sdata); var saveurl; if(self.p.url) { saveurl = $(gr).jqGrid("getGridParam",'url'); $(gr).jqGrid("setGridParam",{url:self.p.url}); } $(gr).jqGrid("setGridParam",{search:sd}).trigger("reloadGrid",[{page:1}]); if(saveurl) {$(gr).jqGrid("setGridParam",{url:saveurl});} if($.isFunction(self.p.afterClear)){self.p.afterClear();} }; var tbl; var formFill = function(){ var tr = document.createElement("tr"); var tr1, sb, cb,tl,td; if(self.p.formtype=='horizontal'){ $(tbl).append(tr); } $.each(self.p.filterModel,function(i,n){ tl = document.createElement("td"); $(tl).append("<label for='"+this.name+"'>"+this.label+"</label>"); td = document.createElement("td"); var $t=this; if(!this.stype) { this.stype='text';} switch (this.stype) { case "select": if(this.surl) { // data returned should have already constructed html select $(td).load(this.surl,function(){ if($t.defval) { $("select",this).val($t.defval); } $("select",this).attr({name:$t.index || $t.name, id: "sg_"+$t.name}); if($t.sopt) { $("select",this).attr($t.sopt); } if(self.p.gridToolbar===true && $t.width) { $("select",this).width($t.width); } if(self.p.autosearch===true){ $("select",this).change(function(e){ triggerSearch(); return false; }); } }); } else { // sopt to construct the values if($t.sopt.value) { var oSv = $t.sopt.value; var elem = document.createElement("select"); $(elem).attr({name:$t.index || $t.name, id: "sg_"+$t.name}).attr($t.sopt); var so, sv, ov; if(typeof oSv === "string") { so = oSv.split(";"); for(var k=0; k<so.length;k++){ sv = so[k].split(":"); ov = document.createElement("option"); ov.value = sv[0]; ov.innerHTML = sv[1]; if (sv[1]==$t.defval) { ov.selected ="selected"; } elem.appendChild(ov); } } else if(typeof oSv === "object" ) { for ( var key in oSv) { if(oSv.hasOwnProperty(key)) { i++; ov = document.createElement("option"); ov.value = key; ov.innerHTML = oSv[key]; if (oSv[key]==$t.defval) { ov.selected ="selected"; } elem.appendChild(ov); } } } if(self.p.gridToolbar===true && $t.width) { $(elem).width($t.width); } $(td).append(elem); if(self.p.autosearch===true){ $(elem).change(function(e){ triggerSearch(); return false; }); } } } break; case 'text': var df = this.defval ? this.defval: ""; $(td).append("<input type='text' name='"+(this.index || this.name)+"' id='sg_"+this.name+"' value='"+df+"'/>"); if($t.sopt) { $("input",td).attr($t.sopt); } if(self.p.gridToolbar===true && $t.width) { if($.browser.msie) { $("input",td).width($t.width-4); } else { $("input",td).width($t.width-2); } } if(self.p.autosearch===true){ $("input",td).keypress(function(e){ var key = e.charCode ? e.charCode : e.keyCode ? e.keyCode : 0; if(key == 13){ triggerSearch(); return false; } return this; }); } break; } if(self.p.formtype=='horizontal'){ if(self.p.gridToolbar===true && self.p.gridNames===false) { $(tr).append(td); } else { $(tr).append(tl).append(td); } $(tr).append(td); } else { tr1 = document.createElement("tr"); $(tr1).append(tl).append(td); $(tbl).append(tr1); } }); td = document.createElement("td"); if(self.p.enableSearch === true){ sb = "<input type='button' id='sButton' class='"+self.p.buttonclass+"' value='"+self.p.searchButton+"'/>"; $(td).append(sb); $("input#sButton",td).click(function(){ triggerSearch(); return false; }); } if(self.p.enableClear === true) { cb = "<input type='button' id='cButton' class='"+self.p.buttonclass+"' value='"+self.p.clearButton+"'/>"; $(td).append(cb); $("input#cButton",td).click(function(){ clearSearch(); return false; }); } if(self.p.enableClear === true || self.p.enableSearch === true) { if(self.p.formtype=='horizontal') { $(tr).append(td); } else { tr1 = document.createElement("tr"); $(tr1).append("<td> </td>").append(td); $(tbl).append(tr1); } } }; var frm = $("<form name='SearchForm' style=display:inline;' class='"+this.p.formclass+"'></form>"); tbl =$("<table class='"+this.p.tableclass+"' cellspacing='0' cellpadding='0' border='0'><tbody></tbody></table>"); $(frm).append(tbl); formFill(); $(this).append(frm); this.triggerSearch = triggerSearch; this.clearSearch = clearSearch; }); } }); })(jQuery); searchFilter.css000064400000000611151677265110007702 0ustar00.ui-searchFilter { display: none; position: absolute; z-index: 770; overflow: visible;} .ui-searchFilter table {position:relative; margin:0em; width:auto} .ui-searchFilter table td {margin: 0em; padding: 1px;} .ui-searchFilter table td input, .ui-searchFilter table td select {margin: 0.1em;} .ui-searchFilter .ui-state-default { cursor: pointer; } .ui-searchFilter .divider hr {margin: 1px; }jquery.searchFilter.js000064400000111631151677265110011051 0ustar00/* Plugin: searchFilter v1.2.9 * Author: Kasey Speakman (kasey@cornerspeed.com) * License: Dual Licensed, MIT and GPL v2 (http://www.gnu.org/licenses/gpl-2.0.html) * * REQUIREMENTS: * jQuery 1.3+ (http://jquery.com/) * A Themeroller Theme (http://jqueryui.com/themeroller/) * * SECURITY WARNING * You should always implement server-side checking to ensure that * the query will fail when forged/invalid data is received. * Clever users can send any value they want through JavaScript and HTTP POST/GET. * * THEMES * Simply include the CSS file for your Themeroller theme. * * DESCRIPTION * This plugin creates a new searchFilter object in the specified container * * INPUT TYPE * fields: an array of field objects. each object has the following properties: * text: a string containing the display name of the field (e.g. "Field 1") * itemval: a string containing the actual field name (e.g. "field1") * optional properties: * ops: an array of operators in the same format as jQuery.fn.searchFilter.defaults.operators * that is: [ { op: 'gt', text: 'greater than'}, { op:'lt', text: 'less than'}, ... ] * if not specified, the passed-in options used, and failting that, jQuery.fn.searchFilter.defaults.operators will be used * *** NOTE *** * Specifying a dataUrl or dataValues property means that a <select ...> (drop-down-list) will be generated * instead of a text input <input type='text'.../> where the user would normally type in their search data * ************ * dataUrl: a url that will return the html select for this field, this url will only be called once for this field * dataValues: the possible values for this field in the form [ { text: 'Data Display Text', value: 'data_actual_value' }, { ... } ] * dataInit: a function that you can use to initialize the data field. this function is passed the jQuery-fied data element * dataEvents: list of events to apply to the data element. uses $("#id").bind(type, [data], fn) to bind events to data element * *** JSON of this object could look like this: *** * var fields = [ * { * text: 'Field Display Name', * itemval: 'field_actual_name', * // below this are optional values * ops: [ // this format is the same as jQuery.fn.searchFilter.defaults.operators * { op: 'gt', text: 'greater than' }, * { op: 'lt', text: 'less than' } * ], * dataUrl: 'http://server/path/script.php?propName=propValue', // using this creates a select for the data input instead of an input type='text' * dataValues: [ // using this creates a select for the data input instead of an input type='text' * { text: 'Data Value Display Name', value: 'data_actual_value' }, * { ... } * ], * dataInit: function(jElem) { jElem.datepicker(options); }, * dataEvents: [ // these are the same options that you pass to $("#id").bind(type, [data], fn) * { type: 'click', data: { i: 7 }, fn: function(e) { console.log(e.data.i); } }, * { type: 'keypress', fn: function(e) { console.log('keypress'); } } * ] * }, * { ... } * ] * options: name:value properties containing various creation options * see jQuery.fn.searchFilter.defaults for the overridable options * * RETURN TYPE: This plugin returns a SearchFilter object, which has additional SearchFilter methods: * Methods * add: Adds a filter. added to the end of the list unless a jQuery event object or valid row number is passed. * del: Removes a filter. removed from the end of the list unless a jQuery event object or valid row number is passed. * reset: resets filters back to original state (only one blank filter), and calls onReset * search: puts the search rules into an object and calls onSearch with it * close: calls the onClose event handler * * USAGE * HTML * <head> * ... * <script src="path/to/jquery.min.js" type="text/javascript"></script> * <link href="path/to/themeroller.css" rel="Stylesheet" type="text/css" /> * <script src="path/to/jquery.searchFilter.js" type="text/javascript"></script> * <link href="path/to/jquery.searchFilter.css" rel="Stylesheet" type="text/css" /> * ... * </head> * <body> * ... * <div id='mySearch'></div> * ... * </body> * JQUERY * Methods * initializing: $("#mySearch").searchFilter([{text: "Field 1", value: "field1"},{text: "Field 2", value: "field2"}], {onSearch: myFilterRuleReceiverFn, onReset: myFilterResetFn }); * Manual Methods (there's no need to call these methods unless you are trying to manipulate searchFilter with script) * add: $("#mySearch").searchFilter().add(); // appends a blank filter * $("#mySearch").searchFilter().add(0); // copies the first filter as second * del: $("#mySearch").searchFilter().del(); // removes the bottom filter * $("#mySearch").searchFilter().del(1); // removes the second filter * search: $("#mySearch").searchFilter().search(); // invokes onSearch, passing it a ruleGroup object * reset: $("#mySearch").searchFilter().reset(); // resets rules and invokes onReset * close: $("#mySearch").searchFilter().close(); // without an onClose handler, equivalent to $("#mySearch").hide(); * * NOTE: You can get the jQuery object back from the SearchFilter object by chaining .$ * Example * $("#mySearch").searchFilter().add().add().reset().$.hide(); * Verbose Example * $("#mySearch") // gets jQuery object for the HTML element with id="mySearch" * .searchFilter() // gets the SearchFilter object for an existing search filter * .add() // adds a new filter to the end of the list * .add() // adds another new filter to the end of the list * .reset() // resets filters back to original state, triggers onReset * .$ // returns jQuery object for $("#mySearch") * .hide(); // equivalent to $("#mySearch").hide(); */ jQuery.fn.searchFilter = function(fields, options) { function SearchFilter(jQ, fields, options) { //--------------------------------------------------------------- // PUBLIC VARS //--------------------------------------------------------------- this.$ = jQ; // makes the jQuery object available as .$ from the return value //--------------------------------------------------------------- // PUBLIC FUNCTIONS //--------------------------------------------------------------- this.add = function(i) { if (i == null) jQ.find(".ui-add-last").click(); else jQ.find(".sf:eq(" + i + ") .ui-add").click(); return this; }; this.del = function(i) { if (i == null) jQ.find(".sf:last .ui-del").click(); else jQ.find(".sf:eq(" + i + ") .ui-del").click(); return this; }; this.search = function(e) { jQ.find(".ui-search").click(); return this; }; this.reset = function(o) { if(o===undefined) o = false; jQ.find(".ui-reset").trigger('click',[o]); return this; }; this.close = function() { jQ.find(".ui-closer").click(); return this; }; //--------------------------------------------------------------- // "CONSTRUCTOR" (in air quotes) //--------------------------------------------------------------- if (fields != null) { // type coercion matches undefined as well as null //--------------------------------------------------------------- // UTILITY FUNCTIONS //--------------------------------------------------------------- function hover() { jQuery(this).toggleClass("ui-state-hover"); return false; } function active(e) { jQuery(this).toggleClass("ui-state-active", (e.type == "mousedown")); return false; } function buildOpt(value, text) { return "<option value='" + value + "'>" + text + "</option>"; } function buildSel(className, options, isHidden) { return "<select class='" + className + "'" + (isHidden ? " style='display:none;'" : "") + ">" + options + "</select>"; } function initData(selector, fn) { var jElem = jQ.find("tr.sf td.data " + selector); if (jElem[0] != null) fn(jElem); } function bindDataEvents(selector, events) { var jElem = jQ.find("tr.sf td.data " + selector); if (jElem[0] != null) { jQuery.each(events, function() { if (this.data != null) jElem.bind(this.type, this.data, this.fn); else jElem.bind(this.type, this.fn); }); } } //--------------------------------------------------------------- // SUPER IMPORTANT PRIVATE VARS //--------------------------------------------------------------- // copies jQuery.fn.searchFilter.defaults.options properties onto an empty object, then options onto that var opts = jQuery.extend({}, jQuery.fn.searchFilter.defaults, options); // this is keeps track of the last asynchronous setup var highest_late_setup = -1; //--------------------------------------------------------------- // CREATION PROCESS STARTS //--------------------------------------------------------------- // generate the global ops var gOps_html = ""; jQuery.each(opts.groupOps, function() { gOps_html += buildOpt(this.op, this.text); }); gOps_html = "<select name='groupOp'>" + gOps_html + "</select>"; /* original content - doesn't minify very well jQ .html("") // clear any old content .addClass("ui-searchFilter") // add classes .append( // add content "\ <div class='ui-widget-overlay' style='z-index: -1'> </div>\ <table class='ui-widget-content ui-corner-all'>\ <thead>\ <tr>\ <td colspan='5' class='ui-widget-header ui-corner-all' style='line-height: 18px;'>\ <div class='ui-closer ui-state-default ui-corner-all ui-helper-clearfix' style='float: right;'>\ <span class='ui-icon ui-icon-close'></span>\ </div>\ " + opts.windowTitle + "\ </td>\ </tr>\ </thead>\ <tbody>\ <tr class='sf'>\ <td class='fields'></td>\ <td class='ops'></td>\ <td class='data'></td>\ <td><div class='ui-del ui-state-default ui-corner-all'><span class='ui-icon ui-icon-minus'></span></div></td>\ <td><div class='ui-add ui-state-default ui-corner-all'><span class='ui-icon ui-icon-plus'></span></div></td>\ </tr>\ <tr>\ <td colspan='5' class='divider'><div> </div></td>\ </tr>\ </tbody>\ <tfoot>\ <tr>\ <td colspan='3'>\ <span class='ui-reset ui-state-default ui-corner-all' style='display: inline-block; float: left;'><span class='ui-icon ui-icon-arrowreturnthick-1-w' style='float: left;'></span><span style='line-height: 18px; padding: 0 7px 0 3px;'>" + opts.resetText + "</span></span>\ <span class='ui-search ui-state-default ui-corner-all' style='display: inline-block; float: right;'><span class='ui-icon ui-icon-search' style='float: left;'></span><span style='line-height: 18px; padding: 0 7px 0 3px;'>" + opts.searchText + "</span></span>\ <span class='matchText'>" + opts.matchText + "</span> \ " + gOps_html + " \ <span class='rulesText'>" + opts.rulesText + "</span>\ </td>\ <td> </td>\ <td><div class='ui-add-last ui-state-default ui-corner-all'><span class='ui-icon ui-icon-plusthick'></span></div></td>\ </tr>\ </tfoot>\ </table>\ "); /* end hard-to-minify code */ /* begin easier to minify code */ jQ.html("").addClass("ui-searchFilter").append("<div class='ui-widget-overlay' style='z-index: -1'> </div><table class='ui-widget-content ui-corner-all'><thead><tr><td colspan='5' class='ui-widget-header ui-corner-all' style='line-height: 18px;'><div class='ui-closer ui-state-default ui-corner-all ui-helper-clearfix' style='float: right;'><span class='ui-icon ui-icon-close'></span></div>" + opts.windowTitle + "</td></tr></thead><tbody><tr class='sf'><td class='fields'></td><td class='ops'></td><td class='data'></td><td><div class='ui-del ui-state-default ui-corner-all'><span class='ui-icon ui-icon-minus'></span></div></td><td><div class='ui-add ui-state-default ui-corner-all'><span class='ui-icon ui-icon-plus'></span></div></td></tr><tr><td colspan='5' class='divider'><hr class='ui-widget-content' style='margin:1px'/></td></tr></tbody><tfoot><tr><td colspan='3'><span class='ui-reset ui-state-default ui-corner-all' style='display: inline-block; float: left;'><span class='ui-icon ui-icon-arrowreturnthick-1-w' style='float: left;'></span><span style='line-height: 18px; padding: 0 7px 0 3px;'>" + opts.resetText + "</span></span><span class='ui-search ui-state-default ui-corner-all' style='display: inline-block; float: right;'><span class='ui-icon ui-icon-search' style='float: left;'></span><span style='line-height: 18px; padding: 0 7px 0 3px;'>" + opts.searchText + "</span></span><span class='matchText'>" + opts.matchText + "</span> " + gOps_html + " <span class='rulesText'>" + opts.rulesText + "</span></td><td> </td><td><div class='ui-add-last ui-state-default ui-corner-all'><span class='ui-icon ui-icon-plusthick'></span></div></td></tr></tfoot></table>"); /* end easier-to-minify code */ var jRow = jQ.find("tr.sf"); var jFields = jRow.find("td.fields"); var jOps = jRow.find("td.ops"); var jData = jRow.find("td.data"); // generate the defaults var default_ops_html = ""; jQuery.each(opts.operators, function() { default_ops_html += buildOpt(this.op, this.text); }); default_ops_html = buildSel("default", default_ops_html, true); jOps.append(default_ops_html); var default_data_html = "<input type='text' class='default' style='display:none;' />"; jData.append(default_data_html); // generate the field list as a string var fields_html = ""; var has_custom_ops = false; var has_custom_data = false; jQuery.each(fields, function(i) { var field_num = i; fields_html += buildOpt(this.itemval, this.text); // add custom ops if they exist if (this.ops != null) { has_custom_ops = true; var custom_ops = ""; jQuery.each(this.ops, function() { custom_ops += buildOpt(this.op, this.text); }); custom_ops = buildSel("field" + field_num, custom_ops, true); jOps.append(custom_ops); } // add custom data if it is given if (this.dataUrl != null) { if (i > highest_late_setup) highest_late_setup = i; has_custom_data = true; var dEvents = this.dataEvents; var iEvent = this.dataInit; var bs = this.buildSelect; jQuery.ajax(jQuery.extend({ url : this.dataUrl, complete: function(data) { var $d; if(bs != null) $d =jQuery("<div />").append(bs(data)); else $d = jQuery("<div />").append(data.responseText); $d.find("select").addClass("field" + field_num).hide(); jData.append($d.html()); if (iEvent) initData(".field" + i, iEvent); if (dEvents) bindDataEvents(".field" + i, dEvents); if (i == highest_late_setup) { // change should get called no more than twice when this searchFilter is constructed jQ.find("tr.sf td.fields select[name='field']").change(); } } },opts.ajaxSelectOptions)); } else if (this.dataValues != null) { has_custom_data = true; var custom_data = ""; jQuery.each(this.dataValues, function() { custom_data += buildOpt(this.value, this.text); }); custom_data = buildSel("field" + field_num, custom_data, true); jData.append(custom_data); } else if (this.dataEvents != null || this.dataInit != null) { has_custom_data = true; var custom_data = "<input type='text' class='field" + field_num + "' />"; jData.append(custom_data); } // attach events to data if they exist if (this.dataInit != null && i != highest_late_setup) initData(".field" + i, this.dataInit); if (this.dataEvents != null && i != highest_late_setup) bindDataEvents(".field" + i, this.dataEvents); }); fields_html = "<select name='field'>" + fields_html + "</select>"; jFields.append(fields_html); // setup the field select with an on-change event if there are custom ops or data var jFSelect = jFields.find("select[name='field']"); if (has_custom_ops) jFSelect.change(function(e) { var index = e.target.selectedIndex; var td = jQuery(e.target).parents("tr.sf").find("td.ops"); td.find("select").removeAttr("name").hide(); // disown and hide all elements var jElem = td.find(".field" + index); if (jElem[0] == null) jElem = td.find(".default"); // if there's not an element for that field, use the default one jElem.attr("name", "op").show(); return false; }); else jOps.find(".default").attr("name", "op").show(); if (has_custom_data) jFSelect.change(function(e) { var index = e.target.selectedIndex; var td = jQuery(e.target).parents("tr.sf").find("td.data"); td.find("select,input").removeClass("vdata").hide(); // disown and hide all elements var jElem = td.find(".field" + index); if (jElem[0] == null) jElem = td.find(".default"); // if there's not an element for that field, use the default one jElem.show().addClass("vdata"); return false; }); else jData.find(".default").show().addClass("vdata"); // go ahead and call the change event and setup the ops and data values if (has_custom_ops || has_custom_data) jFSelect.change(); // bind events jQ.find(".ui-state-default").hover(hover, hover).mousedown(active).mouseup(active); // add hover/active effects to all buttons jQ.find(".ui-closer").click(function(e) { opts.onClose(jQuery(jQ.selector)); return false; }); jQ.find(".ui-del").click(function(e) { var row = jQuery(e.target).parents(".sf"); if (row.siblings(".sf").length > 0) { // doesn't remove if there's only one filter left if (opts.datepickerFix === true && jQuery.fn.datepicker !== undefined) row.find(".hasDatepicker").datepicker("destroy"); // clean up datepicker's $.data mess row.remove(); // also unbinds } else { // resets the filter if it's the last one row.find("select[name='field']")[0].selectedIndex = 0; row.find("select[name='op']")[0].selectedIndex = 0; row.find(".data input").val(""); // blank all input values row.find(".data select").each(function() { this.selectedIndex = 0; }); // select first option on all selects row.find("select[name='field']").change(function(event){event.stopPropagation();}); // trigger any change events } return false; }); jQ.find(".ui-add").click(function(e) { var row = jQuery(e.target).parents(".sf"); var newRow = row.clone(true).insertAfter(row); newRow.find(".ui-state-default").removeClass("ui-state-hover ui-state-active"); if (opts.clone) { newRow.find("select[name='field']")[0].selectedIndex = row.find("select[name='field']")[0].selectedIndex; var stupid_browser = (newRow.find("select[name='op']")[0] == null); // true for IE6 if (!stupid_browser) newRow.find("select[name='op']").focus()[0].selectedIndex = row.find("select[name='op']")[0].selectedIndex; var jElem = newRow.find("select.vdata"); if (jElem[0] != null) // select doesn't copy it's selected index when cloned jElem[0].selectedIndex = row.find("select.vdata")[0].selectedIndex; } else { newRow.find(".data input").val(""); // blank all input values newRow.find("select[name='field']").focus(); } if (opts.datepickerFix === true && jQuery.fn.datepicker !== undefined) { // using $.data to associate data with document elements is Not Good row.find(".hasDatepicker").each(function() { var settings = jQuery.data(this, "datepicker").settings; newRow.find("#" + this.id).unbind().removeAttr("id").removeClass("hasDatepicker").datepicker(settings); }); } newRow.find("select[name='field']").change(function(event){event.stopPropagation();} ); return false; }); jQ.find(".ui-search").click(function(e) { var ui = jQuery(jQ.selector); // pointer to search box wrapper element var ruleGroup; var group_op = ui.find("select[name='groupOp'] :selected").val(); // puls "AND" or "OR" if (!opts.stringResult) { ruleGroup = { groupOp: group_op, rules: [] }; } else { ruleGroup = "{\"groupOp\":\"" + group_op + "\",\"rules\":["; } ui.find(".sf").each(function(i) { var tField = jQuery(this).find("select[name='field'] :selected").val(); var tOp = jQuery(this).find("select[name='op'] :selected").val(); var tData = jQuery(this).find("input.vdata,select.vdata :selected").val(); tData += ""; if (!opts.stringResult) { ruleGroup.rules.push({ field: tField, op: tOp, data: tData }); } else { tData = tData.replace(/\\/g,'\\\\').replace(/\"/g,'\\"'); if (i > 0) ruleGroup += ","; ruleGroup += "{\"field\":\"" + tField + "\","; ruleGroup += "\"op\":\"" + tOp + "\","; ruleGroup += "\"data\":\"" + tData + "\"}"; } }); if (opts.stringResult) ruleGroup += "]}"; opts.onSearch(ruleGroup); return false; }); jQ.find(".ui-reset").click(function(e,op) { var ui = jQuery(jQ.selector); ui.find(".ui-del").click(); // removes all filters, resets the last one ui.find("select[name='groupOp']")[0].selectedIndex = 0; // changes the op back to the default one opts.onReset(op); return false; }); jQ.find(".ui-add-last").click(function() { var row = jQuery(jQ.selector + " .sf:last"); var newRow = row.clone(true).insertAfter(row); newRow.find(".ui-state-default").removeClass("ui-state-hover ui-state-active"); newRow.find(".data input").val(""); // blank all input values newRow.find("select[name='field']").focus(); if (opts.datepickerFix === true && jQuery.fn.datepicker !== undefined) { // using $.data to associate data with document elements is Not Good row.find(".hasDatepicker").each(function() { var settings = jQuery.data(this, "datepicker").settings; newRow.find("#" + this.id).unbind().removeAttr("id").removeClass("hasDatepicker").datepicker(settings); }); } newRow.find("select[name='field']").change(function(event){event.stopPropagation();}); return false; }); this.setGroupOp = function(setting) { /* a "setter" for groupping argument. * ("AND" or "OR") * * Inputs: * setting - a string * * Returns: * Does not return anything. May add success / failure reporting in future versions. * * author: Daniel Dotsenko (dotsa@hotmail.com) */ selDOMobj = jQ.find("select[name='groupOp']")[0]; var indexmap = {}, l = selDOMobj.options.length, i; for (i=0; i<l; i++) { indexmap[selDOMobj.options[i].value] = i; } selDOMobj.selectedIndex = indexmap[setting]; jQuery(selDOMobj).change(function(event){event.stopPropagation();}); }; this.setFilter = function(settings) { /* a "setter" for an arbitrary SearchFilter's filter line. * designed to abstract the DOM manipulations required to infer * a particular filter is a fit to the search box. * * Inputs: * settings - an "object" (dictionary) * index (optional*) (to be implemented in the future) : signed integer index (from top to bottom per DOM) of the filter line to fill. * Negative integers (rooted in -1 and lower) denote position of the line from the bottom. * sfref (optional*) : DOM object referencing individual '.sf' (normally a TR element) to be populated. (optional) * filter (mandatory) : object (dictionary) of form {'field':'field_value','op':'op_value','data':'data value'} * * * It is mandatory to have either index or sfref defined. * * Returns: * Does not return anything. May add success / failure reporting in future versions. * * author: Daniel Dotsenko (dotsa@hotmail.com) */ var o = settings['sfref'], filter = settings['filter']; // setting up valueindexmap that we will need to manipulate SELECT elements. var fields = [], i, j , l, lj, li, valueindexmap = {}; // example of valueindexmap: // {'field1':{'index':0,'ops':{'eq':0,'ne':1}},'fieldX':{'index':1,'ops':{'eq':0,'ne':1},'data':{'true':0,'false':1}}}, // if data is undefined it's a INPUT field. If defined, it's SELECT selDOMobj = o.find("select[name='field']")[0]; for (i=0, l=selDOMobj.options.length; i<l; i++) { valueindexmap[selDOMobj.options[i].value] = {'index':i,'ops':{}}; fields.push(selDOMobj.options[i].value); } for (i=0, li=fields.length; i < li; i++) { selDOMobj = o.find(".ops > select[class='field"+i+"']")[0]; if (selDOMobj) { for (j=0, lj=selDOMobj.options.length; j<lj; j++) { valueindexmap[fields[i]]['ops'][selDOMobj.options[j].value] = j; } } selDOMobj = o.find(".data > select[class='field"+i+"']")[0]; if (selDOMobj) { valueindexmap[fields[i]]['data'] = {}; // this setting is the flag that 'data' is contained in a SELECT for (j=0, lj=selDOMobj.options.length; j<lj; j++) { valueindexmap[fields[i]]['data'][selDOMobj.options[j].value] = j; } } } // done populating valueindexmap // preparsing the index values for SELECT elements. var fieldvalue, fieldindex, opindex, datavalue, dataindex; fieldvalue = filter['field']; if (valueindexmap[fieldvalue]) { fieldindex = valueindexmap[fieldvalue]['index']; } if (fieldindex != null) { opindex = valueindexmap[fieldvalue]['ops'][filter['op']]; if(opindex === undefined) { for(i=0,li=options.operators.length; i<li;i++) { if(options.operators[i].op == filter.op ){ opindex = i; break; } } } datavalue = filter['data']; if (valueindexmap[fieldvalue]['data'] == null) { dataindex = -1; // 'data' is not SELECT, Making the var 'defined' } else { dataindex = valueindexmap[fieldvalue]['data'][datavalue]; // 'undefined' may come from here. } } // only if values for 'field' and 'op' and 'data' are 'found' in mapping... if (fieldindex != null && opindex != null && dataindex != null) { o.find("select[name='field']")[0].selectedIndex = fieldindex; o.find("select[name='field']").change(); o.find("select[name='op']")[0].selectedIndex = opindex; o.find("input.vdata").val(datavalue); // if jquery does not find any INPUT, it does not set any. This means we deal with SELECT o = o.find("select.vdata")[0]; if (o) { o.selectedIndex = dataindex; } return true } else { return false } }; // end of this.setFilter fn } // end of if fields != null } return new SearchFilter(this, fields, options); }; jQuery.fn.searchFilter.version = '1.2.9'; /* This property contains the default options */ jQuery.fn.searchFilter.defaults = { /* * PROPERTY * TYPE: boolean * DESCRIPTION: clone a row if it is added from an existing row * when false, any new added rows will be blank. */ clone: true, /* * PROPERTY * TYPE: boolean * DESCRIPTION: current version of datepicker uses a data store, * which is incompatible with $().clone(true) */ datepickerFix: true, /* * FUNCTION * DESCRIPTION: the function that will be called when the user clicks Reset * INPUT TYPE: JS object if stringResult is false, otherwise is JSON string */ onReset: function(data) { alert("Reset Clicked. Data Returned: " + data) }, /* * FUNCTION * DESCRIPTION: the function that will be called when the user clicks Search * INPUT TYPE: JS object if stringResult is false, otherwise is JSON string */ onSearch: function(data) { alert("Search Clicked. Data Returned: " + data) }, /* * FUNCTION * DESCRIPTION: the function that will be called when the user clicks the Closer icon * or the close() function is called * if left null, it simply does a .hide() on the searchFilter * INPUT TYPE: a jQuery object for the searchFilter */ onClose: function(jElem) { jElem.hide(); }, /* * PROPERTY * TYPE: array of objects, each object has the properties op and text * DESCRIPTION: the selectable operators that are applied between rules * e.g. for {op:"AND", text:"all"} * the search filter box will say: match all rules * the server should interpret this as putting the AND op between each rule: * rule1 AND rule2 AND rule3 * text will be the option text, and op will be the option value */ groupOps: [ { op: "AND", text: "all" }, { op: "OR", text: "any" } ], /* * PROPERTY * TYPE: array of objects, each object has the properties op and text * DESCRIPTION: the operators that will appear as drop-down options * text will be the option text, and op will be the option value */ operators: [ { op: "eq", text: "is equal to" }, { op: "ne", text: "is not equal to" }, { op: "lt", text: "is less than" }, { op: "le", text: "is less or equal to" }, { op: "gt", text: "is greater than" }, { op: "ge", text: "is greater or equal to" }, { op: "in", text: "is in" }, { op: "ni", text: "is not in" }, { op: "bw", text: "begins with" }, { op: "bn", text: "does not begin with" }, { op: "ew", text: "ends with" }, { op: "en", text: "does not end with" }, { op: "cn", text: "contains" }, { op: "nc", text: "does not contain" } ], /* * PROPERTY * TYPE: string * DESCRIPTION: part of the phrase: _match_ ANY/ALL rules */ matchText: "match", /* * PROPERTY * TYPE: string * DESCRIPTION: part of the phrase: match ANY/ALL _rules_ */ rulesText: "rules", /* * PROPERTY * TYPE: string * DESCRIPTION: the text that will be displayed in the reset button */ resetText: "Reset", /* * PROPERTY * TYPE: string * DESCRIPTION: the text that will be displayed in the search button */ searchText: "Search", /* * PROPERTY * TYPE: boolean * DESCRIPTION: a flag that, when set, will make the onSearch and onReset return strings instead of objects */ stringResult: true, /* * PROPERTY * TYPE: string * DESCRIPTION: the title of the searchFilter window */ windowTitle: "Search Rules", /* * PROPERTY * TYPE: object * DESCRIPTION: options to extend the ajax request */ ajaxSelectOptions : {} }; /* end of searchFilter */jquery.contextmenu.js000064400000010322151677265110011002 0ustar00/* * ContextMenu - jQuery plugin for right-click context menus * * Author: Chris Domigan * Contributors: Dan G. Switzer, II * Parts of this plugin are inspired by Joern Zaefferer's Tooltip plugin * * Dual licensed under the MIT and GPL licenses: * http://www.opensource.org/licenses/mit-license.php * http://www.gnu.org/licenses/gpl.html * * Version: r2 * Date: 16 July 2007 * * For documentation visit http://www.trendskitchens.co.nz/jquery/contextmenu/ * */ (function($) { var menu, shadow, content, hash, currentTarget; var defaults = { menuStyle: { listStyle: 'none', padding: '1px', margin: '0px', backgroundColor: '#fff', border: '1px solid #999', width: '100px' }, itemStyle: { margin: '0px', color: '#000', display: 'block', cursor: 'default', padding: '3px', border: '1px solid #fff', backgroundColor: 'transparent' }, itemHoverStyle: { border: '1px solid #0a246a', backgroundColor: '#b6bdd2' }, eventPosX: 'pageX', eventPosY: 'pageY', shadow : true, onContextMenu: null, onShowMenu: null }; $.fn.contextMenu = function(id, options) { if (!menu) { // Create singleton menu menu = $('<div id="jqContextMenu"></div>') .hide() .css({position:'absolute', zIndex:'500'}) .appendTo('body') .bind('click', function(e) { e.stopPropagation(); }); } if (!shadow) { shadow = $('<div></div>') .css({backgroundColor:'#000',position:'absolute',opacity:0.2,zIndex:499}) .appendTo('body') .hide(); } hash = hash || []; hash.push({ id : id, menuStyle: $.extend({}, defaults.menuStyle, options.menuStyle || {}), itemStyle: $.extend({}, defaults.itemStyle, options.itemStyle || {}), itemHoverStyle: $.extend({}, defaults.itemHoverStyle, options.itemHoverStyle || {}), bindings: options.bindings || {}, shadow: options.shadow || options.shadow === false ? options.shadow : defaults.shadow, onContextMenu: options.onContextMenu || defaults.onContextMenu, onShowMenu: options.onShowMenu || defaults.onShowMenu, eventPosX: options.eventPosX || defaults.eventPosX, eventPosY: options.eventPosY || defaults.eventPosY }); var index = hash.length - 1; $(this).bind('contextmenu', function(e) { // Check if onContextMenu() defined var bShowContext = (!!hash[index].onContextMenu) ? hash[index].onContextMenu(e) : true; currentTarget = e.target; if (bShowContext) { display(index, this, e ); return false; } }); return this; }; function display(index, trigger, e ) { var cur = hash[index]; content = $('#'+cur.id).find('ul:first').clone(true); content.css(cur.menuStyle).find('li').css(cur.itemStyle).hover( function() { $(this).css(cur.itemHoverStyle); }, function(){ $(this).css(cur.itemStyle); } ).find('img').css({verticalAlign:'middle',paddingRight:'2px'}); // Send the content to the menu menu.html(content); // if there's an onShowMenu, run it now -- must run after content has been added // if you try to alter the content variable before the menu.html(), IE6 has issues // updating the content if (!!cur.onShowMenu) menu = cur.onShowMenu(e, menu); $.each(cur.bindings, function(id, func) { $('#'+id, menu).bind('click', function() { hide(); func(trigger, currentTarget); }); }); menu.css({'left':e[cur.eventPosX],'top':e[cur.eventPosY]}).show(); if (cur.shadow) shadow.css({width:menu.width(),height:menu.height(),left:e.pageX+2,top:e.pageY+2}).show(); $(document).one('click', hide); } function hide() { menu.hide(); shadow.hide(); } // Apply defaults $.contextMenu = { defaults : function(userDefaults) { $.each(userDefaults, function(i, val) { if (typeof val == 'object' && defaults[i]) { $.extend(defaults[i], val); } else defaults[i] = val; }); } }; })(jQuery); $(function() { $('div.contextMenu').hide(); });jquery.tablednd.js000064400000041014151677265110010210 0ustar00/** * TableDnD plug-in for JQuery, allows you to drag and drop table rows * You can set up various options to control how the system will work * Copyright (c) Denis Howlett <denish@isocra.com> * Licensed like jQuery, see http://docs.jquery.com/License. * * Configuration options: * * onDragStyle * This is the style that is assigned to the row during drag. There are limitations to the styles that can be * associated with a row (such as you can't assign a border--well you can, but it won't be * displayed). (So instead consider using onDragClass.) The CSS style to apply is specified as * a map (as used in the jQuery css(...) function). * onDropStyle * This is the style that is assigned to the row when it is dropped. As for onDragStyle, there are limitations * to what you can do. Also this replaces the original style, so again consider using onDragClass which * is simply added and then removed on drop. * onDragClass * This class is added for the duration of the drag and then removed when the row is dropped. It is more * flexible than using onDragStyle since it can be inherited by the row cells and other content. The default * is class is tDnD_whileDrag. So to use the default, simply customise this CSS class in your * stylesheet. * onDrop * Pass a function that will be called when the row is dropped. The function takes 2 parameters: the table * and the row that was dropped. You can work out the new order of the rows by using * table.rows. * onDragStart * Pass a function that will be called when the user starts dragging. The function takes 2 parameters: the * table and the row which the user has started to drag. * onAllowDrop * Pass a function that will be called as a row is over another row. If the function returns true, allow * dropping on that row, otherwise not. The function takes 2 parameters: the dragged row and the row under * the cursor. It returns a boolean: true allows the drop, false doesn't allow it. * scrollAmount * This is the number of pixels to scroll if the user moves the mouse cursor to the top or bottom of the * window. The page should automatically scroll up or down as appropriate (tested in IE6, IE7, Safari, FF2, * FF3 beta * dragHandle * This is the name of a class that you assign to one or more cells in each row that is draggable. If you * specify this class, then you are responsible for setting cursor: move in the CSS and only these cells * will have the drag behaviour. If you do not specify a dragHandle, then you get the old behaviour where * the whole row is draggable. * * Other ways to control behaviour: * * Add class="nodrop" to any rows for which you don't want to allow dropping, and class="nodrag" to any rows * that you don't want to be draggable. * * Inside the onDrop method you can also call $.tableDnD.serialize() this returns a string of the form * <tableID>[]=<rowID1>&<tableID>[]=<rowID2> so that you can send this back to the server. The table must have * an ID as must all the rows. * * Other methods: * * $("...").tableDnDUpdate() * Will update all the matching tables, that is it will reapply the mousedown method to the rows (or handle cells). * This is useful if you have updated the table rows using Ajax and you want to make the table draggable again. * The table maintains the original configuration (so you don't have to specify it again). * * $("...").tableDnDSerialize() * Will serialize and return the serialized string as above, but for each of the matching tables--so it can be * called from anywhere and isn't dependent on the currentTable being set up correctly before calling * * Known problems: * - Auto-scoll has some problems with IE7 (it scrolls even when it shouldn't), work-around: set scrollAmount to 0 * * Version 0.2: 2008-02-20 First public version * Version 0.3: 2008-02-07 Added onDragStart option * Made the scroll amount configurable (default is 5 as before) * Version 0.4: 2008-03-15 Changed the noDrag/noDrop attributes to nodrag/nodrop classes * Added onAllowDrop to control dropping * Fixed a bug which meant that you couldn't set the scroll amount in both directions * Added serialize method * Version 0.5: 2008-05-16 Changed so that if you specify a dragHandle class it doesn't make the whole row * draggable * Improved the serialize method to use a default (and settable) regular expression. * Added tableDnDupate() and tableDnDSerialize() to be called when you are outside the table */ jQuery.tableDnD = { /** Keep hold of the current table being dragged */ currentTable : null, /** Keep hold of the current drag object if any */ dragObject: null, /** The current mouse offset */ mouseOffset: null, /** Remember the old value of Y so that we don't do too much processing */ oldY: 0, /** Actually build the structure */ build: function(options) { // Set up the defaults if any this.each(function() { // This is bound to each matching table, set up the defaults and override with user options this.tableDnDConfig = jQuery.extend({ onDragStyle: null, onDropStyle: null, // Add in the default class for whileDragging onDragClass: "tDnD_whileDrag", onDrop: null, onDragStart: null, scrollAmount: 5, serializeRegexp: /[^\-]*$/, // The regular expression to use to trim row IDs serializeParamName: null, // If you want to specify another parameter name instead of the table ID dragHandle: null // If you give the name of a class here, then only Cells with this class will be draggable }, options || {}); // Now make the rows draggable jQuery.tableDnD.makeDraggable(this); }); // Now we need to capture the mouse up and mouse move event // We can use bind so that we don't interfere with other event handlers jQuery(document) .bind('mousemove', jQuery.tableDnD.mousemove) .bind('mouseup', jQuery.tableDnD.mouseup); // Don't break the chain return this; }, /** This function makes all the rows on the table draggable apart from those marked as "NoDrag" */ makeDraggable: function(table) { var config = table.tableDnDConfig; if (table.tableDnDConfig.dragHandle) { // We only need to add the event to the specified cells var cells = jQuery("td."+table.tableDnDConfig.dragHandle, table); cells.each(function() { // The cell is bound to "this" jQuery(this).mousedown(function(ev) { jQuery.tableDnD.dragObject = this.parentNode; jQuery.tableDnD.currentTable = table; jQuery.tableDnD.mouseOffset = jQuery.tableDnD.getMouseOffset(this, ev); if (config.onDragStart) { // Call the onDrop method if there is one config.onDragStart(table, this); } return false; }); }) } else { // For backwards compatibility, we add the event to the whole row var rows = jQuery("tr", table); // get all the rows as a wrapped set rows.each(function() { // Iterate through each row, the row is bound to "this" var row = jQuery(this); if (! row.hasClass("nodrag")) { row.mousedown(function(ev) { if (ev.target.tagName == "TD") { jQuery.tableDnD.dragObject = this; jQuery.tableDnD.currentTable = table; jQuery.tableDnD.mouseOffset = jQuery.tableDnD.getMouseOffset(this, ev); if (config.onDragStart) { // Call the onDrop method if there is one config.onDragStart(table, this); } return false; } }).css("cursor", "move"); // Store the tableDnD object } }); } }, updateTables: function() { this.each(function() { // this is now bound to each matching table if (this.tableDnDConfig) { jQuery.tableDnD.makeDraggable(this); } }) }, /** Get the mouse coordinates from the event (allowing for browser differences) */ mouseCoords: function(ev){ if(ev.pageX || ev.pageY){ return {x:ev.pageX, y:ev.pageY}; } return { x:ev.clientX + document.body.scrollLeft - document.body.clientLeft, y:ev.clientY + document.body.scrollTop - document.body.clientTop }; }, /** Given a target element and a mouse event, get the mouse offset from that element. To do this we need the element's position and the mouse position */ getMouseOffset: function(target, ev) { ev = ev || window.event; var docPos = this.getPosition(target); var mousePos = this.mouseCoords(ev); return {x:mousePos.x - docPos.x, y:mousePos.y - docPos.y}; }, /** Get the position of an element by going up the DOM tree and adding up all the offsets */ getPosition: function(e){ var left = 0; var top = 0; /** Safari fix -- thanks to Luis Chato for this! */ if (e.offsetHeight == 0) { /** Safari 2 doesn't correctly grab the offsetTop of a table row this is detailed here: http://jacob.peargrove.com/blog/2006/technical/table-row-offsettop-bug-in-safari/ the solution is likewise noted there, grab the offset of a table cell in the row - the firstChild. note that firefox will return a text node as a first child, so designing a more thorough solution may need to take that into account, for now this seems to work in firefox, safari, ie */ e = e.firstChild; // a table cell } if (e && e.offsetParent) { while (e.offsetParent){ left += e.offsetLeft; top += e.offsetTop; e = e.offsetParent; } left += e.offsetLeft; top += e.offsetTop; } return {x:left, y:top}; }, mousemove: function(ev) { if (jQuery.tableDnD.dragObject == null) { return; } var dragObj = jQuery(jQuery.tableDnD.dragObject); var config = jQuery.tableDnD.currentTable.tableDnDConfig; var mousePos = jQuery.tableDnD.mouseCoords(ev); var y = mousePos.y - jQuery.tableDnD.mouseOffset.y; //auto scroll the window var yOffset = window.pageYOffset; if (document.all) { // Windows version //yOffset=document.body.scrollTop; if (typeof document.compatMode != 'undefined' && document.compatMode != 'BackCompat') { yOffset = document.documentElement.scrollTop; } else if (typeof document.body != 'undefined') { yOffset=document.body.scrollTop; } } if (mousePos.y-yOffset < config.scrollAmount) { window.scrollBy(0, -config.scrollAmount); } else { var windowHeight = window.innerHeight ? window.innerHeight : document.documentElement.clientHeight ? document.documentElement.clientHeight : document.body.clientHeight; if (windowHeight-(mousePos.y-yOffset) < config.scrollAmount) { window.scrollBy(0, config.scrollAmount); } } if (y != jQuery.tableDnD.oldY) { // work out if we're going up or down... var movingDown = y > jQuery.tableDnD.oldY; // update the old value jQuery.tableDnD.oldY = y; // update the style to show we're dragging if (config.onDragClass) { dragObj.addClass(config.onDragClass); } else { dragObj.css(config.onDragStyle); } // If we're over a row then move the dragged row to there so that the user sees the // effect dynamically var currentRow = jQuery.tableDnD.findDropTargetRow(dragObj, y); if (currentRow) { // TODO worry about what happens when there are multiple TBODIES if (movingDown && jQuery.tableDnD.dragObject != currentRow) { jQuery.tableDnD.dragObject.parentNode.insertBefore(jQuery.tableDnD.dragObject, currentRow.nextSibling); } else if (! movingDown && jQuery.tableDnD.dragObject != currentRow) { jQuery.tableDnD.dragObject.parentNode.insertBefore(jQuery.tableDnD.dragObject, currentRow); } } } return false; }, /** We're only worried about the y position really, because we can only move rows up and down */ findDropTargetRow: function(draggedRow, y) { var rows = jQuery.tableDnD.currentTable.rows; for (var i=0; i<rows.length; i++) { var row = rows[i]; var rowY = this.getPosition(row).y; var rowHeight = parseInt(row.offsetHeight)/2; if (row.offsetHeight == 0) { rowY = this.getPosition(row.firstChild).y; rowHeight = parseInt(row.firstChild.offsetHeight)/2; } // Because we always have to insert before, we need to offset the height a bit if ((y > rowY - rowHeight) && (y < (rowY + rowHeight))) { // that's the row we're over // If it's the same as the current row, ignore it if (row == draggedRow) {return null;} var config = jQuery.tableDnD.currentTable.tableDnDConfig; if (config.onAllowDrop) { if (config.onAllowDrop(draggedRow, row)) { return row; } else { return null; } } else { // If a row has nodrop class, then don't allow dropping (inspired by John Tarr and Famic) var nodrop = jQuery(row).hasClass("nodrop"); if (! nodrop) { return row; } else { return null; } } return row; } } return null; }, mouseup: function(e) { if (jQuery.tableDnD.currentTable && jQuery.tableDnD.dragObject) { var droppedRow = jQuery.tableDnD.dragObject; var config = jQuery.tableDnD.currentTable.tableDnDConfig; // If we have a dragObject, then we need to release it, // The row will already have been moved to the right place so we just reset stuff if (config.onDragClass) { jQuery(droppedRow).removeClass(config.onDragClass); } else { jQuery(droppedRow).css(config.onDropStyle); } jQuery.tableDnD.dragObject = null; if (config.onDrop) { // Call the onDrop method if there is one config.onDrop(jQuery.tableDnD.currentTable, droppedRow); } jQuery.tableDnD.currentTable = null; // let go of the table too } }, serialize: function() { if (jQuery.tableDnD.currentTable) { return jQuery.tableDnD.serializeTable(jQuery.tableDnD.currentTable); } else { return "Error: No Table id set, you need to set an id on your table and every row"; } }, serializeTable: function(table) { var result = ""; var tableId = table.id; var rows = table.rows; for (var i=0; i<rows.length; i++) { if (result.length > 0) result += "&"; var rowId = rows[i].id; if (rowId && rowId && table.tableDnDConfig && table.tableDnDConfig.serializeRegexp) { rowId = rowId.match(table.tableDnDConfig.serializeRegexp)[0]; } result += tableId + '[]=' + rowId; } return result; }, serializeTables: function() { var result = ""; this.each(function() { // this is now bound to each matching table result += jQuery.tableDnD.serializeTable(this); }); return result; }, destroy:function(){ jQuery(document) .unbind('mousemove', jQuery.tableDnD.mousemove) .unbind('mouseup', jQuery.tableDnD.mouseup); } } jQuery.fn.extend( { tableDnD : jQuery.tableDnD.build, tableDnDUpdate : jQuery.tableDnD.updateTables, tableDnDSerialize: jQuery.tableDnD.serializeTables, unTableDnD : jQuery.tableDnD.destroy } ); grid.postext.js000064400000003011151677265110007542 0ustar00;(function($){ /** * jqGrid extension * Paul Tiseo ptiseo@wasteconsultants.com * * Dual licensed under the MIT and GPL licenses: * http://www.opensource.org/licenses/mit-license.php * http://www.gnu.org/licenses/gpl-2.0.html **/ $.jgrid.extend({ getPostData : function(){ var $t = this[0]; if(!$t.grid) { return; } return $t.p.postData; }, setPostData : function( newdata ) { var $t = this[0]; if(!$t.grid) { return; } // check if newdata is correct type if ( typeof(newdata) === 'object' ) { $t.p.postData = newdata; } else { alert("Error: cannot add a non-object postData value. postData unchanged."); } }, appendPostData : function( newdata ) { var $t = this[0]; if(!$t.grid) { return; } // check if newdata is correct type if ( typeof(newdata) === 'object' ) { $.extend($t.p.postData, newdata); } else { alert("Error: cannot append a non-object postData value. postData unchanged."); } }, setPostDataItem : function( key, val ) { var $t = this[0]; if(!$t.grid) { return; } $t.p.postData[key] = val; }, getPostDataItem : function( key ) { var $t = this[0]; if(!$t.grid) { return; } return $t.p.postData[key]; }, removePostDataItem : function( key ) { var $t = this[0]; if(!$t.grid) { return; } delete $t.p.postData[key]; }, getUserData : function(){ var $t = this[0]; if(!$t.grid) { return; } return $t.p.userData; }, getUserDataItem : function( key ) { var $t = this[0]; if(!$t.grid) { return; } return $t.p.userData[key]; } }); })(jQuery);grid.tbltogrid.js000064400000006071151677265110010037 0ustar00/* Transform a table to a jqGrid. Peter Romianowski <peter.romianowski@optivo.de> If the first column of the table contains checkboxes or radiobuttons then the jqGrid is made selectable. */ // Addition - selector can be a class or id function tableToGrid(selector, options) { jQuery(selector).each(function() { if(this.grid) {return;} //Adedd from Tony Tomov // This is a small "hack" to make the width of the jqGrid 100% jQuery(this).width("99%"); var w = jQuery(this).width(); // Text whether we have single or multi select var inputCheckbox = jQuery('tr td:first-child input[type=checkbox]:first', jQuery(this)); var inputRadio = jQuery('tr td:first-child input[type=radio]:first', jQuery(this)); var selectMultiple = inputCheckbox.length > 0; var selectSingle = !selectMultiple && inputRadio.length > 0; var selectable = selectMultiple || selectSingle; //var inputName = inputCheckbox.attr("name") || inputRadio.attr("name"); // Build up the columnModel and the data var colModel = []; var colNames = []; jQuery('th', jQuery(this)).each(function() { if (colModel.length === 0 && selectable) { colModel.push({ name: '__selection__', index: '__selection__', width: 0, hidden: true }); colNames.push('__selection__'); } else { colModel.push({ name: jQuery(this).attr("id") || jQuery.trim(jQuery.jgrid.stripHtml(jQuery(this).html())).split(' ').join('_'), index: jQuery(this).attr("id") || jQuery.trim(jQuery.jgrid.stripHtml(jQuery(this).html())).split(' ').join('_'), width: jQuery(this).width() || 150 }); colNames.push(jQuery(this).html()); } }); var data = []; var rowIds = []; var rowChecked = []; jQuery('tbody > tr', jQuery(this)).each(function() { var row = {}; var rowPos = 0; jQuery('td', jQuery(this)).each(function() { if (rowPos === 0 && selectable) { var input = jQuery('input', jQuery(this)); var rowId = input.attr("value"); rowIds.push(rowId || data.length); if (input.is(":checked")) { rowChecked.push(rowId); } row[colModel[rowPos].name] = input.attr("value"); } else { row[colModel[rowPos].name] = jQuery(this).html(); } rowPos++; }); if(rowPos >0) { data.push(row); } }); // Clear the original HTML table jQuery(this).empty(); // Mark it as jqGrid jQuery(this).addClass("scroll"); jQuery(this).jqGrid(jQuery.extend({ datatype: "local", width: w, colNames: colNames, colModel: colModel, multiselect: selectMultiple //inputName: inputName, //inputValueCol: imputName != null ? "__selection__" : null }, options || {})); // Add data var a; for (a = 0; a < data.length; a++) { var id = null; if (rowIds.length > 0) { id = rowIds[a]; if (id && id.replace) { // We have to do this since the value of a checkbox // or radio button can be anything id = encodeURIComponent(id).replace(/[.\-%]/g, "_"); } } if (id === null) { id = a + 1; } jQuery(this).jqGrid("addRowData",id, data[a]); } // Set the selection for (a = 0; a < rowChecked.length; a++) { jQuery(this).jqGrid("setSelection",rowChecked[a]); } }); }; nagios.py000064400000013115151700142040006371 0ustar00import agent_util import os import sys DEFAULT_NAGIOS_FOLDER = "/usr/share/fm-agent/nagios" def parse_performance_data(output, log): """ Parse Nagios performance data, as defined in their API docs at https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/3/en/pluginapi.html Returns a list of dictionaries, each with the following keys (if found in output): - label - value - unit - min_value - max_value """ lines = output.strip().split("\n") # Grab the first line, after the | which is performance data parts = [] if "|" in lines[0]: parts.extend(lines[0].split("|", 1)[1].split(" ")) # Look in rest of output for a | indicating that all the rest is performance data found_perf = False for line in lines[1:]: if not found_perf and "|" in line: found_perf = True line = line.split("|", 1)[1] if found_perf: parts.extend(line.split(" ")) metrics = [] # Parse each part into component pieces for part in parts: metric = {} try: pieces = part.strip().strip(";").split(";") label, value = pieces[0].split("=") metric["label"] = label.strip("'").strip() if value == "U": value = None else: # Split the value from the unit, if it's there unit = "" for i, char in enumerate(value): if char not in "-0123456789.": unit = value[i:] value = value[:i] break value = float(value) metric["value"] = value metric["unit"] = unit # Extract min and max, if present if len(pieces) >= 4: metric["min_value"] = float(pieces[3]) if len(pieces) >= 5: metric["max_value"] = float(pieces[4]) metrics.append(metric) except: log.exception("Error parsing Nagios output: %s" % part) return metrics class NagiosPlugin(agent_util.Plugin): textkey = "nagios" label = "Nagios" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None custom_folder = config.get("plugins_location") plugins = self._get_all_plugins(DEFAULT_NAGIOS_FOLDER, custom_folder) if not plugins: status = agent_util.UNSUPPORTED msg = "No nagios plugin found on %s %s" % ( DEFAULT_NAGIOS_FOLDER, custom_folder or "", ) return {} metric_options = [] for dir, plugin in plugins: self.log.info("Found Nagios plugin: %s" % plugin) # Add an option for the return status metric_options.append( { "nagios_script": plugin, "metric": "status", "resource": "%s: status" % (plugin), } ) try: ret_code, output = agent_util.execute_command(os.path.join(dir, plugin)) self.log.info("Nagios: %s %s" % (ret_code, output)) for metric in parse_performance_data(output, self.log): metric_options.append( { "nagios_script": plugin, "metric": metric["label"], "resource": "%s: %s" % (plugin, metric["label"]), } ) except: self.log.exception( "Error gathering metadata for Nagios plugin %s" % plugin ) self.log.info( "%s Nagios options found: %s" % (len(metric_options), metric_options) ) options_schema = { "nagios_script": "string", "metric": "string", "resource": "string", } metadata = { "nagios_metric": { "label": "Nagios Metric", "options": metric_options, "options_schema": options_schema, "status": status, "error_message": msg, } } return metadata @staticmethod def _get_all_plugins(*args): plugins = [] for arg in args: if arg and os.path.isdir(arg): for file in os.listdir(arg): if os.access(os.path.join(arg, file), os.X_OK): plugins.append((arg, file)) return plugins def check(self, textkey, plugin_metric, config): split = plugin_metric.split(":") plugin_name = split[0].strip() metric_name = split[1].strip() custom_folder = config.get("plugins_location") plugins = self._get_all_plugins(DEFAULT_NAGIOS_FOLDER, custom_folder) for dir, plugin in plugins: if plugin == plugin_name: self.log.debug( "Executing %s to get %s" % (os.path.join(dir, plugin), textkey) ) ret_code, output = agent_util.execute_command(os.path.join(dir, plugin)) self.log.debug("Nagios: %s %s" % (ret_code, output)) if metric_name == "status": return ret_code for metric in parse_performance_data(output, self.log): if metric["label"] == metric_name: return metric["value"] self.log.info("No matching Nagios plugin found for %s" % textkey) return None nginx.py000064400000030521151700142040006234 0ustar00import re import agent_util import logging import glob from library.log_matcher import LogMatcher try: # Python3 from urllib.request import urlopen except ImportError: # Python2 from urllib2 import urlopen logger = logging.getLogger(__name__) def execute_query(query): ret, output = agent_util.execute_command(query) return str(output) LOG_COUNT_EXPRESSIONS = {"4xx": r"4\d{2}", "5xx": r"5\d{2}", "2xx": r"2\d{2}"} DEFAULT_NGINX_LOG = "/var/log/nginx/access.log" class NginxPlugin(agent_util.Plugin): textkey = "nginx" label = "Nginx" DEFAULTS = {"console_url": "http://localhost"} @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if nginx is even installed or running installed = agent_util.which("nginx") if not installed and not config.get("from_docker"): self.log.info("nginx binary not found") status = agent_util.UNSUPPORTED return {} if "console_url" not in config: config.update(self.DEFAULTS) if status == agent_util.SUPPORTED and not config.get("from_docker"): query = "%s/nginx_status" % config["console_url"] nginxStatus = ( urlopen("%s/nginx_status" % config["console_url"]).read().decode() ) if config.get("debug", False): self.log.debug("Nginx command '%s' output:" % query) self.log.debug(str(nginxStatus)) if not nginxStatus: status = agent_util.MISCONFIGURED msg = "The nginx_status path is not configured." data = { "active_connections": { "label": "Number of open connections", "options": None, "status": status, "error_message": msg, "unit": "connections", }, "accepted_connections": { "label": "Number of accepted connections per second", "options": None, "status": status, "error_message": msg, "unit": "connections/s", }, "dropped_connections": { "label": "Number of dropped connections per second", "options": None, "status": status, "error_message": msg, "unit": "connections/s", }, "handled_connections": { "label": "Number of handled connections per second", "options": None, "status": status, "error_message": msg, "unit": "connections/s", }, "requests_per_second": { "label": "Average requests per second", "options": None, "status": status, "error_message": msg, "unit": "requests/s", }, "requests_per_connection": { "label": "Number of requests per connection", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "nginx_reading": { "label": "Read request header", "options": None, "status": status, "error_message": msg, "unit": "requests/s", }, "nginx_writing": { "label": "Read request body", "options": None, "status": status, "error_message": msg, "unit": "requests/s", }, "nginx_waiting": { "label": "Keep alive connections", "options": None, "status": status, "error_message": msg, "unit": "connections/s", }, "4xx": { "label": "Rate of 4xx's events", "options": None, "status": status, "error_message": msg, "unit": "entries/s", }, "2xx": { "label": "Rate of 2xx's events", "options": None, "status": status, "error_message": msg, "unit": "entries/s", }, "5xx": { "label": "Rate of 5xx's events", "options": None, "status": status, "error_message": msg, "unit": "entries/s", }, } return data @classmethod def get_metadata_docker(self, container, config): if "console_url" not in config: try: ip = agent_util.get_container_ip(container) config["console_url"] = "http://%s" % ip except Exception: import sys _, e, _ = sys.exc_info() self.log.exception(e) config["from_docker"] = True return self.get_metadata(config) def _calculate_delta(self, textkey, value, is_rate=True): """ Extract the previous cached value, calculate the delta, and store the current one. """ cached = self.get_cache_results("nginx:%s" % textkey, None) if not cached: self.log.info("Empty nginx cache! Building for first time") self.cache_result("nginx:%s" % textkey, None, value, replace=True) return None delta, previous_value = cached[0] self.cache_result("nginx:%s" % textkey, None, value, replace=True) if previous_value > value: return None if is_rate: return (value - previous_value) / float(delta) else: return value - previous_value def check(self, textkey, data, config): if not config.get("console_url"): config.update(self.DEFAULTS) result = urlopen("%s/nginx_status" % config["console_url"]).read().decode() statLines = result.split("\n") p = re.compile(r"(\d+)") connections = p.findall(statLines[2]) connectionsByStatus = p.findall(statLines[3]) result = 0 status_map = { "nginx_reading": int(connectionsByStatus[0]), "nginx_writing": int(connectionsByStatus[1]), "nginx_waiting": int(connectionsByStatus[2]), } if textkey == "active_connections": active_connections = p.findall(statLines[0]) result = int(active_connections[0]) elif textkey == "requests_per_connection": active_connections = p.findall(statLines[0]) active_connections = int(active_connections[0]) requests = int(connections[2]) requests_textkey = "%s:%s" % (textkey, "requests") requests_diff = self._calculate_delta( requests_textkey, requests, is_rate=False ) if active_connections and requests_diff: return requests_diff / active_connections else: return None # All these values use the delta calculation method elif textkey in ( "nginx_reading", "nginx_writing", "nginx_waiting", "requests_per_second", "accepted_connections", "handled_connections", "handles_request", "dropped_connections", ): # The only difference is in how they get the current value if textkey in ("nginx_reading", "nginx_writing", "nginx_waiting"): current_res = status_map[textkey] elif textkey == "accepted_connections": current_res = int(connections[0]) elif textkey == "handled_connections": current_res = int(connections[1]) elif textkey in ("requests_per_second"): current_res = int(connections[2]) elif textkey in ("dropped_connections"): current_res = int(connections[0]) - int(connections[1]) return self._calculate_delta(textkey, current_res) # Handle the log count metrics elif textkey in ("4xx", "5xx", "2xx"): log_files = [DEFAULT_NGINX_LOG] for key, value in config.items(): if key not in ["debug", "console_url"]: value = value.strip('"').strip("'") if "*" in value: log_files += glob.glob(value) else: log_files += [value] file_inodes = {} total_metrics = 0 timescale = 1 column = 8 expression = LOG_COUNT_EXPRESSIONS.get(textkey) for target in log_files: # Extract the file current inode try: file_inodes[target] = LogMatcher.get_file_inode(target) except OSError: import sys _, error, _ = sys.exc_info() logging.error("Error opening %s file." % (target)) logging.error(error) continue # Extract data from the agent cache about the check log_data = self.get_cache_results( textkey, "%s/%s" % (self.schedule.id, target) ) if log_data: log_data = log_data[0][-1] else: log_data = dict() last_line_number = log_data.get("last_known_line") stored_inode = log_data.get("inode") results = log_data.get("results", []) # Extract the lines of the file. try: total_lines, current_lines = LogMatcher.get_file_lines( last_line_number, target, file_inodes[target], stored_inode ) except IOError: import sys _, e, _ = sys.exc_info() logging.error( "Unable to read log file: %s. Make sure fm-agent user belongs to group adm" % str(e) ) continue logging.info( "Stored line %s Current line %s Looking at %s lines" % (str(last_line_number), str(total_lines), str(len(current_lines))) ) # Perform the matching of the expression in the lines log_matcher = LogMatcher(stored_inode) results = log_matcher.match_in_column(current_lines, expression, column) metric, results = log_matcher.calculate_metric(results, timescale) total_metrics += metric and metric or 0 logging.info( 'Found %s instances of "%s" in %s' % (str(metric or 0), expression, target) ) previous_result = self.get_cache_results( textkey, "%s/%s" % (self.schedule.id, target) ) cache_data = dict( inode=file_inodes[target], last_known_line=total_lines, results=results, ) self.cache_result( textkey, "%s/%s" % (self.schedule.id, target), cache_data, replace=True, ) if not previous_result: result = None else: delta, prev_data = previous_result[0] try: prev_count = prev_data.get("results")[0][-1] curr_count = cache_data.get("results")[0][-1] result = curr_count / float(delta) except IndexError: result = None return result def check_docker(self, container, textkey, data, config): if "console_url" not in config: try: ip = agent_util.get_container_ip(container) config["console_url"] = "http://%s" % ip except Exception: import sys _, e, _ = sys.exc_info() self.log.exception(e) config["from_docker"] = True return self.check(textkey, data, config) nodejs.py000064400000014703151700142040006377 0ustar00import agent_util import re try: import json except: import simplejson as json from agent_util import float from os.path import isfile class NodeJSPlugin(agent_util.Plugin): """ NodeJS checking plugin for the fm agent. """ textkey = "nodejs" label = "NodeJS" _node = agent_util.which("nodejs") and "nodejs" or "node" @classmethod def get_metadata(self, config): if NodeJSPlugin._node_executable(config): status = agent_util.SUPPORTED else: status = agent_util.UNSUPPORTED if status == agent_util.UNSUPPORTED: msg = "Error finding a valid nodejs application." return {} else: msg = None found_keys = ["resident_set_size", "heap_total", "heap_used"] metadata = {} for key in found_keys: unit = "MB" metadata[key] = { "label": key.replace("_", " ").capitalize(), "options": None, "status": status, "msg": msg, "unit": unit, } metadata["cluster_workers"] = { "label": "Cluster workers", "options": None, "status": status, "msg": msg, "unit": "worker count", } metadata["high_resolution_time"] = { "label": "High resolution time", "options": None, "status": status, "msg": msg, "unit": "seconds", } return metadata @staticmethod def _node_executable(config): """ Run a simple command to make sure that the nodejs executable is available to the system. """ custom_path = config.get("node_binary_location", None) if custom_path: return isfile(custom_path) and agent_util.which(custom_path) return agent_util.which(NodeJSPlugin._node) def _retrieve_heap_data(self): """ Make a pass of retrieving the heap size of the V8. """ heap_stats = "require('v8').getHeapStatistics()" return self._eval_node(heap_stats) def _eval_node(self, instruction): """ Evaluate the passed instruction in node. All instructions are included with a console log statement to retrieve the passed information. """ node_executable = NodeJSPlugin._node_executable(self.config) eval_command = """ %s -p "%s" """ % (node_executable, instruction) result = agent_util.execute_command(eval_command) if result[0] == 0: return result[1] else: return 0 def _retrieve_entry_from_data(self, data, value): """ Retrieve a single value of the heap data returned by nodejs. """ expr = r"%s: (\d+)" % (str(value)) result = re.findall(expr, data) if result: return float(result[0]) def _retrieve_workers_data(self): """ Retrieve the result of gettings the workers. """ instruction = "require('cluster').workers" worker_obj = self._eval_node(instruction) if not worker_obj: self.log.error( "node returned unexpected output. Is the binary location correct?" ) return None expr = r"(\S+):" result = re.findall(expr, worker_obj) return len(result) def _retrieve_high_resolution_time(self): """ Retrieve the high resolution time in seconds. """ instruction = "process.hrtime()" result = self._eval_node(instruction) if not result: self.log.error( "node returned unexpected output. Is the binary location correct?" ) return None expr = r"\[ (\d+)," seconds = re.findall(expr, result) if seconds: return float(seconds[0]) else: return None def _retrieve_memory_data(self): """ Retrieve the memory data of NodeJS process. """ instruction = "process.memoryUsage()" result = self._eval_node(instruction) return result def _retrieve_resident_set_size(self): """ Extract the resident set size from the process memoryUsage. """ data = self._retrieve_memory_data() if not data: self.log.error( "node returned unexpected output. Is the binary location correct?" ) return None value = self._find_value("rss", data) value = value and self._convert_bytes_to_mb(value) or None return value def _retrieve_heap_total(self): """ Return the heap total. """ data = self._retrieve_memory_data() if not data: self.log.error( "node returned unexpected output. Is the binary location correct?" ) return None value = self._find_value("heapTotal", data) value = value and self._convert_bytes_to_mb(value) or None return value def _retrieve_heap_used(self): """ Return the heap used. """ data = self._retrieve_memory_data() if not data: self.log.error( "node returned unexpected output. Is the binary location correct?" ) return None value = self._find_value("heapUsed", data) value = value and self._convert_bytes_to_mb(value) or None return value def _convert_bytes_to_mb(self, value): """ Peform a quick conversion to mb. """ return float(value) / (2**20) def _find_value(self, target, data): """ Find the target in the passed data string as a javascript object. """ expr = r"%s: (\d+)" % (str(target)) result = re.findall(expr, data) return result and result[0] or None def check(self, textkey, data, config): self.config = config if textkey == "cluster_workers": return self._retrieve_workers_data() elif textkey == "high_resolution_time": return self._retrieve_high_resolution_time() elif textkey == "resident_set_size": return self._retrieve_resident_set_size() elif textkey == "heap_total": return self._retrieve_heap_total() else: return self._retrieve_heap_used() ntp.py000064400000003757151700142040005725 0ustar00import agent_util import time from datetime import datetime import socket import struct def getNTPTime(host="pool.ntp.org", port=123): buf = 1024 address = (host, port) msg = "\x1b" + 47 * "\0" TIME1970 = 2208988800 # 1970-01-01 00:00:00 # connect to server try: client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) client.settimeout(15) client.sendto(msg.encode(), address) msg, address = client.recvfrom(buf) t = struct.unpack("!12I", msg)[10] t -= TIME1970 ## t = time.gmtime(t) ## t = datetime(year=t.tm_year, month=t.tm_mon, day=t.tm_mday, hour=t.tm_hour, minute=t.tm_min, second=t.tm_sec) return t except: return None class NTPPlugin(agent_util.Plugin): textkey = "ntp" label = "NTP" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None metadata = { "ntp_diff": { "label": "Difference between machine and NTP time", "options": None, "status": status, "error_message": msg, "unit": "second", } } return metadata def check(self, textkey, data, config={}): if textkey == "ntp_diff": host = config.get("ntp_host", "pool.ntp.org") port = int(config.get("ntp_port", 123)) ntp_time = getNTPTime(host, port) local_time = time.mktime(datetime.now().timetuple()) if not ntp_time: self.log.critical("Unable to get NTP time from %s:%s" % (host, port)) return else: self.log.info("NTP: %s, local: %s" % (ntp_time, local_time)) time_diff = abs(local_time - ntp_time) if time_diff < 86400 * 365: # Return the value if it's less than a year out of sync return time_diff else: return None return 0 opcache.py000064400000004465151700142040006523 0ustar00import agent_util import logging from agent_util import float try: # Python 2.x from urllib2 import urlopen except ImportError: from urllib.request import urlopen logger = logging.getLogger(__name__) class OpCachePlugin(agent_util.Plugin): textkey = "opcache" label = "PHP OpCache" @classmethod def update_metrics(self, config): # Make initial call to opcache.py using url set in config metric = {} r = urlopen(config["opcache_url"]) reply = r.read() reply = reply.strip().strip(";").split(";") for item in reply: if "time" in item: continue # So we can strip out the duplicate entries from the metadata like the duplicate free/used_memory stats # did this because the first metric it returns is the actual correct one, the second is the default elif item.split(":")[0] in metric.keys(): continue elif "Array" in item: continue else: metric_name = item.split(":")[0] metric_value = item.split(":")[-1] metric[metric_name] = float(metric_value) return metric @classmethod def get_metadata(self, config): status = agent_util.MISCONFIGURED msg = "Missing/incorrect data in [opcache] block!" if "opcache_url" in config: url = config["opcache_url"] status = agent_util.SUPPORTED msg = None else: return {} metadata = {} metrics = self.update_metrics(config) options = [] for textkey in metrics: if textkey == "Array": continue metadata[textkey] = { "label": textkey.replace("_", " ").title(), "options": options, "status": status, "error_message": msg, } return metadata def check(self, textkey, data, config): tmp = self.update_metrics(config) if textkey in tmp: # Adjusting for MB since it outputs in bytes if "memory" in textkey or "buffer" in textkey: return float(tmp[str(textkey)]) / 1048576 else: return tmp[str(textkey)] else: return None oracle.py000064400000037145151700142040006367 0ustar00""" FortiMonitor Monitoring Agent Oracle Database Plugin Copyright 2023 Fortinet, Inc. All Rights Reserved. fm-ops@fortinet.com To Configure: The following configuration options need to be set under the [oracle] block in the agent configuration file: - oracle_home - ORACLE_HOME value that points to the local database installation - oracle_sid - name of the database instance to be used for connections - username - username for user that the agent should use when connecting to the database - password - password for user that the agent should use when connecting to the database - tns_listener_ip (optional) - IP address that the TNS listenener service is running on The user that will be used by the agent needs the following permission grants to fully operate: GRANT CREATE SESSION TO <USER> ; GRANT SELECT ON dba_data_files TO <USER> ; GRANT SELECT ON dba_segments TO <USER> ; GRANT SELECT ON dba_free_space, v$resource_limit to <USER> ; GRANT SELECT ON v_$resource_limit to <USER> ; """ import agent_util import os import os.path import tempfile from agent_util import float import logging logger = logging.getLogger(__name__) tablespace_query = """select b.tablespace_name, tbs_size SizeMb, a.free_space FreeMb from (select tablespace_name, round(sum(bytes)/1024/1024 ,2) as free_space from dba_free_space group by tablespace_name) a, (select tablespace_name, sum(bytes)/1024/1024 as tbs_size from dba_data_files group by tablespace_name) b where a.tablespace_name(+)=b.tablespace_name; """ resource_query = """select resource_name, current_utilization, max_utilization from v$resource_limit;""" def execute_query(sid, config, query, tnsname=None): "Run an Oracle query via sqlplus and parse the results" # Generate a temporary file for the query script (file, filename) = tempfile.mkstemp() orig_filename = filename filename += ".sql" f = open(filename, "w") f.write("set pages 1000;\nset linesize 1000;\n %s\nquit;" % query) f.close() # Build the SQL*PLUS command and call it global_vars = "LD_LIBRARY_PATH=%s/lib ORACLE_HOME=%s ORACLE_SID=%s " % ( config["oracle_home"], config["oracle_home"], sid, ) command = os.path.join(config.get("oracle_home"), "bin", "sqlplus") command_format = " -S %s/" command_arguments = (config.get("username"), config.get("password"), filename) command_format += '\\"%s\\"' if tnsname: command_format += "@%s" command_arguments = ( config.get("username"), config.get("password"), tnsname, filename, ) elif config.get("rds_end_point"): command_format += "@//%s:1521/orcl" command_arguments = ( config.get("username"), config.get("password"), config.get("rds_end_point"), filename, ) command_format += " @%s" cmd = global_vars + command + command_format % command_arguments status, output = agent_util.execute_command(cmd, timeout=10) # Remove our temporary file os.remove(filename) os.remove(orig_filename) # Parse the output results = [] lines = output.strip().split("\n") columns = lines[0].lower().split() for line in lines[2:]: line = line.strip() if not line: continue if line.endswith("rows selected."): continue values = line.split() results.append(dict(zip(columns, values))) return results class OraclePlugin(agent_util.Plugin): textkey = "oracle" label = "Oracle" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # Make sure they provided an oracle configuration block if not config: self.log.info("No oracle configuration block found") return {} # Check to see if Oracle block has been setup in the configuration file - if not, no op if not ( "oracle_home" in config and "oracle_sid" in config and "username" in config and "password" in config ): msg = "One or more Oracle configuration parameters missing from the agent config file" self.log.info(msg) status = agent_util.MISCONFIGURED # Make sure the sqlplus executable is installed and executable if status == agent_util.SUPPORTED and not os.path.exists( os.path.join(config.get("oracle_home"), "bin", "sqlplus") ): msg = "Oracle sqlplus executable not found in directory specified in agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.MISCONFIGURED: data = { "tnslistener": { "label": "Whether TNS listener is active and rechable", "options": None, "status": status, "error_message": msg, } } return data data = {} sid_list = [s.strip() for s in config.get("oracle_sid").split(",")] # Verify that the tnsping executable is present if os.path.exists(os.path.join(config.get("oracle_home"), "bin", "tnsping")): data["tnslistener"] = { "label": "Whether TNS listener is active and reachable", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } """ To support mapping the query parameter to the sid, introduce the oracle_servicenames_ keyset. The SID name should be lower() for the oracle_servicenames_ keys. This can be either service names or tnsnames, but need verification. [oracle] oracle_sids = SID1, SID2 oracle_servicenames_sid1 = pdb1, pdb2 oracle_servicenames_sid2 = pdb3, pdb4 """ # Gather up the available tablespaces that can be reported on spaces = [] for sid in sid_list: tsname_key = "oracle_servicenames_{}".format(sid.lower()) tsnames = config.get(tsname_key, None) if tsnames: tnsnames = [ts.strip() for ts in tsnames.split(",")] for tnsname in tnsnames: results = execute_query(sid, config, tablespace_query, tnsname) key = "{}:{}".format(sid, tnsname) for r in results: spaces.append("{}:{}".format(key, r["tablespace_name"])) elif config.get("tnsname"): results = execute_query( sid, config, tablespace_query, config.get("tnsname") ) for r in results: spaces.append("{}:{}".format(sid, r["tablespace_name"])) elif config.get("rds_end_point"): results = execute_query(sid, config, tablespace_query) for r in results: spaces.append("{}:{}".format(sid, r["tablespace_name"])) if spaces: valid_spaces = agent_util.SUPPORTED msg = "" else: valid_spaces = agent_util.MISCONFIGURED msg = "Unable to find spaces with the current oracle agent configuration" data["tablespace.size"] = { "label": "Size of tablespace in MB", "options": spaces, "status": valid_spaces, "error_message": msg, "units": "MB", } data["tablespace.free"] = { "label": "Free space of tablespace in MB", "options": spaces, "status": valid_spaces, "error_message": msg, "units": "MB", } data["tablespace.percent_free"] = { "label": "Percent of tablespace free", "options": spaces, "status": valid_spaces, "error_message": msg, "units": "percent", } # Gather up the resource utilization metrics results = execute_query(sid_list[0], config, resource_query) if results: data["resource.process.current"] = { "label": "Number of current processes", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.process.max"] = { "label": "Maximum number of processes", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.session.current"] = { "label": "Number of current sessions", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.session.max"] = { "label": "Maximum number of sessions", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.enqueue_lock.current"] = { "label": "Number of current enqueue locks", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.enqueue_lock.max"] = { "label": "Maximum number of enqueue locks", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.enqueue_resource.current"] = { "label": "Number of current enqueue resources", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.enqueue_resource.max"] = { "label": "Maximum number of enqueue resources", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.ges_lock.current"] = { "label": "Number of current Global Enqueue Service locks", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.ges_lock.max"] = { "label": "Maximum number of Global Enqueue Service locks", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.ges_proc.current"] = { "label": "Number of current Global Enqueue Service processes ", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.ges_proc.max"] = { "label": "Maximum number of Global Enqueue Service processes", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.ges_resource.current"] = { "label": "Number of Global Enqueue Service resources", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.ges_resource.max"] = { "label": "Maximum number of Global Enqueue Service resources", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.max_shared_servers.current"] = { "label": "Current number of Maximum Shared Servers", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.max_shared_servers.max"] = { "label": "Maximum number of Maximum Shared Servers", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.transactions.current"] = { "label": "Current number of transactions", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } data["resource.transactions.max"] = { "label": "Maximum number of transactions", "options": sid_list, "status": agent_util.SUPPORTED, "error_message": "", } return data def check(self, textkey, data, config): if not data: msg = "Resource option is missing for metric type `%s`." % textkey self.log.info(msg) if textkey.startswith("resource."): sid = data junk, metric, kind = textkey.split(".") metric_mapping = { "process": "processes", "session": "sessions", "enqueue_lock": "enqueue_locks", "enqueue_resource": "enqueue_resources", "ges_lock": "ges_locks", "ges_proc": "ges_procs", "ges_resource": "ges_ress", "max_shared_servers": "max_shared_servers", "transactions": "transactions", } results = execute_query(sid, config, resource_query) for r in results: if r["resource_name"] == metric_mapping.get(metric, None): if kind == "current": return int(r["current_utilization"]) elif kind == "max": return int(r["max_utilization"]) elif textkey.startswith("tablespace"): sid = None tablespace = None tsname = None items = data.split(":") if 2 == len(items): sid = items[0] tablespace = items[1] tsname = config.get("tsname", None) elif 3 == len(items): sid = items[0] tsname = items[1] tablespace = items[2] if sid is None or tablespace is None: raise Exception("Unrecognized option {}".format(data)) results = execute_query(sid, config, tablespace_query, tsname) for r in results: if r["tablespace_name"] == tablespace: if textkey.endswith(".size"): return float(r["sizemb"]) elif textkey.endswith(".free"): return float(r["freemb"]) elif textkey.endswith(".percent_free"): if float(r["sizemb"]) == 0: return 0.0 else: return float(r["freemb"]) * 100.0 / float(r["sizemb"]) # If we got here, the target tablespace wasn't found, return None so # graphs don't render return elif textkey == "tnslistener": sid = data ip_address = config.get("tns_listener_ip", "127.0.0.1") cmd = "ORACLE_HOME=%s ORACLE_SID=%s " % (config["oracle_home"], sid) cmd += "%s %s" % ( os.path.join(config.get("oracle_home"), "bin", "tnsping"), ip_address, ) status, output = agent_util.execute_command(cmd) if "OK" in output: return 1 else: return 0 else: msg = "Unknown metric type `%s` retuning `%s` as value." % (textkey, 0) self.log.info(msg) # Unknown metric type, return None by default return package_upgrade.py000064400000021602151700142040010213 0ustar00# from dateutil.parser import parse # from dateutil.relativedelta import relativedelta from datetime import datetime, date import time import gzip import os import agent_util import re DEBIAN = 1 REDHAT = 2 IS_DEBIAN = agent_util.which("/usr/bin/apt-get") is not None IS_REDHAT = agent_util.which("/usr/bin/yum") is not None class PackageUpgradePlugin(agent_util.Plugin): textkey = "package_upgrade" label = "Package Upgrades" @classmethod def get_metadata(self, config): package_count_status = agent_util.UNSUPPORTED package_date_status = agent_util.UNSUPPORTED package_count_msg = None package_date_msg = None if IS_DEBIAN or IS_REDHAT: package_date_status = agent_util.SUPPORTED if IS_DEBIAN: try: import apt_check if ( agent_util.execute_command("sudo /usr/bin/apt-get --version")[0] == 0 ): package_count_status = agent_util.SUPPORTED except: package_count_msg = "Insufficient permission - enable sudo access to apt-get for agent user" else: if PackageUpgradePlugin._can_use_sudo(): package_count_status = agent_util.SUPPORTED else: package_count_msg = "Insufficient permission - enable sudo access to yum for agent user" else: package_date_msg = "Unsupported platform" package_count_msg = "Unsupported platform" self.log.info("Unsupported platform") return {} metadata = { "packages.security": { "label": "Security-related packages waiting to be updated", "options": None, "status": package_count_status, "error_message": package_count_msg, "unit": "", "option_string": False, }, "packages.nonsecurity": { "label": "Non-security-related packages waiting to be updated", "options": None, "status": package_count_status, "error_message": package_count_msg, "unit": "", "option_string": False, }, "packages.lastupdated": { "label": "Days since the last package update was run", "options": None, "status": package_date_status, "error_message": package_date_msg, "unit": "", "option_string": False, }, "packages.check_installation": { "label": "Check for Package installation", "options": None, "status": package_date_status, "error_message": package_date_msg, "unit": "", "option_string": True, }, } return metadata @classmethod def _can_use_sudo(self): """ Verify that the user running the agent has enough permissions to run. """ if agent_util.execute_command("sudo /usr/bin/yum --help")[0] == 0: return True else: self.log.error( "Insufficient permission - Enable sudo access for agent user." ) return False def check(self, textkey, data, config={}): if IS_DEBIAN: if textkey in ["packages.security", "packages.nonsecurity"]: try: import apt_check except: return 0 if agent_util.execute_command("sudo /usr/bin/apt-get update")[0] != 0: return 0 upgrades, security_updates = apt_check.check() if textkey == "packages.security": return security_updates else: return upgrades if textkey == "packages.security": return security_updates else: return upgrades elif textkey == "packages.lastupdated": # Get list of apt history log files, from newest to oldest, search each one til we find an update files = ( agent_util.execute_command("ls -t /var/log/apt/history*")[1] .strip() .split("\n") ) for f in files: if f.endswith(".gz"): lines = agent_util.execute_command("zcat %s" % f)[1] else: lines = agent_util.execute_command("cat %s" % f)[1] matches = re.findall("Upgrade:.+\nEnd-Date:(.*)\n", lines) if matches: try: dt = matches[-1].strip() d, t = dt.split() d = datetime.strptime(d, "%Y-%m-%d") age = (datetime.now() - d).days return max(age, 0) except: self.log.error("Error parsing last upgrade time in %s" % f) # if we got here, we didn't find anything. Return None as a marker return None elif textkey == "packages.check_installation": if data: command = "dpkg-query -l %s" % data.strip() if agent_util.execute_command(command)[0] != 0: return 0 else: return 1 elif IS_REDHAT: if textkey in ["packages.security", "packages.nonsecurity"]: if not self._can_use_sudo(): return None retcode, output = agent_util.execute_command( "sudo yum check-update --security" ) if "\n\n" not in output: num_sec_packages = 0 else: num_sec_packages = len(output.split("\n\n")[-1].split("\n")) if textkey == "packages.security": return num_sec_packages else: retcode, output = agent_util.execute_command( "sudo yum check-update" ) if "\n\n" not in output: num_packages = 0 else: num_packages = len(output.split("\n\n")[-1].split("\n")) return max(0, num_packages - num_sec_packages) elif textkey == "packages.lastupdated": # Get list of apt history log files, from newest to oldest, # search each one til we find an update if not self._can_use_sudo(): # Return 0 cause we can't determine the update time return None files = ( agent_util.execute_command("ls -t /var/log/yum.log*")[1] .strip() .split("\n") ) for f in files: lines = ( agent_util.execute_command( "sudo zgrep -A 1 Updated: %s | tail -n 1" % f )[1] .strip() .split("\n") ) if len(lines) >= 1: d = lines[-1][:6] if not d: # Logs came up empty due to rotation. continue try: d = datetime.strptime(d, "%b %d").replace( year=date.today().year ) except AttributeError: # strptime doesn't appeared until Py2.5, using fallback time. d = datetime(*(time.strptime(d, "%b %d")[0:6])).replace( year=date.today().year ) if d > datetime.now(): d = d.replace(year=date.today().year - 1) age = (datetime.now() - d).days return max(age, 0) elif textkey == "packages.check_installation": if data: command = "rpm -qi %s" % data.strip() if agent_util.execute_command(command)[0] != 0: return 0 else: return 1 # if we got here, we didn't find anything. Return None as a marker return None # If we get here, we aren't running on a system where we can actually determine what's available. # Default to None to signify error. return None phpfpm.py000064400000013155151700142040006407 0ustar00import agent_util import logging from agent_util import float try: # Python 2.x import httplib except: import http.client as httplib try: # Python 2.x import urlparse except: import urllib.parse as urlpars logger = logging.getLogger(__name__) def execute_query(query): ret, output = agent_util.execute_command(query) return str(output) class PHPFPMPlugin(agent_util.Plugin): textkey = "phpfpm" label = "PHP-FPM" @classmethod def get_metadata(self, config): # Installation and config checks installed = agent_util.which("php5-fpm") or agent_util.which("php-fpm") configured = "console_url" in config # if config is present, trust it and proceed if configured: self.log.info("console_url found in config. Marking plugin as supported") status = agent_util.SUPPORTED msg = None # PHP-FPM is installed, but not configured, ask the user for more config assistance elif installed and not configured: self.log.info("PHP FPM binary found, but console_url is not in config") status = agent_util.MISCONFIGURED msg = "console_url is not in config" return {} # PHP-FPM does not appear to be installed and no config provided, disqualify the plugin else: self.log.info("No console_url provided and php-fpm binary not found") status = agent_util.UNSUPPORTED msg = "php-fpm binary not found" return {} # Generate options based on the number of entries in the config file. options = config.get("console_url").split(",") self.log.info("PHP-FPM is supported. Generating Metadata.") # This is the metadata for the plugin. data = { "active processes": { "label": "Active processes", "options": options, "status": status, "error_message": msg, }, "idle processes": { "label": "Idle processes", "options": options, "status": status, "error_message": msg, }, "listen queue": { "label": "Listen queue", "options": options, "status": status, "error_message": msg, }, "listen queue len": { "label": "Listen queue len", "options": options, "status": status, "error_message": msg, }, "max active processes": { "label": "Max active processes", "options": options, "status": status, "error_message": msg, }, "max children reached": { "label": "Children reached", "options": options, "status": status, "error_message": msg, }, "max listen queue": { "label": "Max listen queue", "options": options, "status": status, "error_message": msg, }, "slow requests": { "label": "Slow Requests", "options": options, "status": status, "error_message": msg, }, "start since": { "label": "Start since", "options": options, "status": status, "error_message": msg, }, "start time": { "label": "Start time", "options": options, "status": status, "error_message": msg, }, "total processes": { "label": "Total Processes", "options": options, "status": status, "error_message": msg, }, } return data def check(self, textkey, data, config): """ Make a GET request to the console url and parse the output. """ if data.startswith("http"): url = urlparse.urlparse(data + "?json&full") if data.startswith("https:"): connection = httplib.HTTPSConnection(host=url.netloc, timeout=25) else: connection = httplib.HTTPConnection(host=url.netloc, timeout=25) connection.request("GET", "%s?%s" % (url.path, url.query)) resp = connection.getresponse() if int(resp.status) != 200: logging.error( "Invalid response from %s/%s Reason: %s" % (url.netloc, url.path, resp.reason) ) return else: output = resp.read().decode("utf-8") connection.close() else: query = ( r"SCRIPT_NAME=/status SCRIPT_FILENAME=/status QUERY_STRING=json\&full REQUEST_METHOD=GET cgi-fcgi -bind -connect " + data + " |tail -1" ) ret, output = agent_util.execute_command(query) try: statLines = agent_util.json_loads(output) except Exception: logging.exception("Unable to parse json output.") return metric = str(textkey).replace("_", " ") if statLines.has_key(metric): return float(statLines[metric]) else: raise Exception( "stats output did not contain metric " + metric + ". stats output: " + statLines ) ping.py000064400000005153151700142040006051 0ustar00import agent_util import sys import os import time import string import re class PingPlugin(agent_util.Plugin): textkey = "agent_ping" label = "Agent Ping" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None data = { "ping_status": { "label": "Ping status", "options": None, "option_string": True, "status": status, "error_message": msg, "unit": "boolean", }, "ping_packet_loss": { "label": "Ping packet loss", "options": None, "option_string": True, "status": status, "error_message": msg, "unit": "percent", }, "ping_latency": { "label": "Ping latency", "options": None, "option_string": True, "status": status, "error_message": msg, "unit": "ms", }, } return data def check(self, textkey, option, config): ping_binary_path = agent_util.which("ping") ping_timeout = 5.0 if "darwin" in sys.platform.lower(): ping_timeout = 5000.0 if textkey == "ping_status": ret, output = agent_util.execute_command( "%s -c 1 -W %s %s" % (ping_binary_path, ping_timeout, option) ) if not ret: return 1 else: return None elif textkey == "ping_packet_loss": ret, output = agent_util.execute_command( "%s -c 10 -i 0.2 -W %s %s 2> /dev/null" % (ping_binary_path, ping_timeout * 2, option) ) for line in output.split("/n"): if "packet loss" in line: pl = float( re.search(r"([-+]?\d*\.\d+|\d+)% packet loss", line).group(1) ) return pl # if no match, default to 100% packet loss return 100.0 elif textkey == "ping_latency": ret, output = agent_util.execute_command( "%s -c 1 -W %s %s" % (ping_binary_path, ping_timeout, option) ) for line in output.split("/n"): if "time" in line: rt = float(re.search(r"time=([-+]?\d*\.\d+|\d+) ms", line).group(1)) return rt # if no match, default to 0 ms return None else: return None postfix.py000064400000003771151700142040006614 0ustar00import agent_util class PostfixPlugin(agent_util.Plugin): textkey = "postfix" label = "Postfix" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if mailq is even installed installed = agent_util.which("mailq") if not installed: self.log.info("mailq binaries not found") status = agent_util.UNSUPPORTED msg = "mailq not found" return {} if status is agent_util.SUPPORTED: try: ret, output = agent_util.execute_command("mailq") if ret != 0: raise Exception except: self.log.error("couldn't get postfix status") status = agent_util.MISCONFIGURED msg = "Couldn't get postfix status, make sure mail system is running " return {} data = { "postfix.queue_size": { "label": "Postfix queue size", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "postfix.requests": { "label": "Number of requests", "options": None, "status": status, "error_message": msg, "unit": "requsts", }, } return data def check(self, textkey, data, config): try: res = 0 ret, output = agent_util.execute_command("mailq | tail -n 1") self.log.debug("mailq | tail -n 1: %s" % str(output)) if ret == 0: output = output.strip().replace("--", "").replace(".", "") if textkey == "postfix.queue_size": res = output.split("Kbytes")[0] else: res = output.split("in")[-1].split("Request")[0] res = int(res) except: res = 0 return res postgresql.py000064400000052577151700142040007333 0ustar00import agent_util import csv import sys try: import pg8000 except: # Fallback for older Python versions that aren't supported pg8000 = None import logging from agent_util import float def execute_query(config, query): unix_sock = config.get("unix_sock", None) hostname = config.get("hostname", "localhost") port = int(config.get("port", "5432")) user = config.get("username", "postgres") password = config.get("password", "postgres") database = config.get("database", "postgres") # make the connection based on either a unix socket or tcp connection if unix_sock: try: pgconn = pg8000.connect( user=user, host=None, unix_sock=unix_sock, port=port, password=password, database=database, ) except Exception: logging.exception("Error connecting using provided socket %s" % unix_sock) return None else: try: pgconn = pg8000.connect( user=user, host=hostname, port=port, password=password, database=database, ) except Exception: logging.exception("Error connecting using provided PostgreSQL credentials") return None cursor = pgconn.cursor() q = cursor.execute(query) output = cursor.fetchall() cursor.close() pgconn.close() return output class PostgreSQLPlugin(agent_util.Plugin): """ If this plugin does not work you'll need to confirm at least two steps 1. The user has at least SELECT privileges on the database of your choice 2. Your postgres config has password authentication turned ON. This can be done by putting these settings at the top of your pg_hba.conf file: local all all trust host all 127.0.0.1/32 trust This will allow password authentication to PostgreSQL but still lock it down to only localhost to use password auth """ textkey = "postgresql" label = "PostgreSQL" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if pg8000 is None: self.log.info("Postgres plugin not supported with current Python version") return {} # check if postgresql is even installed installed = agent_util.which("psql") if not installed: self.log.info("psql binary not found") status = agent_util.UNSUPPORTED msg = "psql binary not found" return {} if installed and ( not config.get("database", "postgres") or not config.get("database", "postgres") or not config.get("database", "postgres") ): msg = "Missing config information, please include username, password and database" status = agent_util.MISCONFIGURED if status is agent_util.SUPPORTED: try: output = execute_query( config, "select datname from pg_database where datistemplate = false", ) if output is None: status = agent_util.MISCONFIGURED msg = "Double check the PostgreSQL credentials provided in the agent config file" except: self.log.exception("error running postgresql query") status = agent_util.MISCONFIGURED msg = "Double check the PostgreSQL credentials provided in the agent config file" db_list = [] if status is agent_util.SUPPORTED: res = execute_query(config, "SELECT datname FROM pg_database") for db in res: db_list.append(db[0]) data = { "rows_select_idx": { "label": "Rows returned", "options": db_list, "status": status, "error_message": msg, "unit": "rows/min", }, "rows_select_scan": { "label": "Rows scanned", "options": db_list, "status": status, "error_message": msg, "unit": "rows/min", }, "rows_insert": { "label": "INSERTS", "options": db_list, "status": status, "error_message": msg, "unit": "rows/min", }, "rows_update": { "label": "UPDATES", "options": db_list, "status": status, "error_message": msg, "unit": "rows/min", }, "rows_delete": { "label": "DELETES", "options": db_list, "status": status, "error_message": msg, "unit": "rows/min", }, "rows_total": { "label": "Total queries", "options": db_list, "status": status, "error_message": msg, "unit": "rows/min", }, "numbackends": { "label": "Number of active backends", "options": db_list, "status": status, "error_message": msg, "unit": "backends", }, "xact_commit": { "label": "Transactions committed", "options": db_list, "status": status, "error_message": msg, "unit": "txn/min", }, "xact_rollback": { "label": "Transactions rolled back", "options": db_list, "status": status, "error_message": msg, "unit": "txn/min", }, "xact_total": { "label": "Total transactions", "options": db_list, "status": status, "error_message": msg, "unit": "txn/min", }, "blks_read": { "label": "Blocks read from disk", "options": db_list, "status": status, "error_message": msg, "unit": "blocks/min", }, "blks_hit": { "label": "Blocks read from buffer cache", "options": db_list, "status": status, "error_message": msg, "unit": "blocks/min", }, "blks_cache_pc": { "label": "Buffer cache hit rate (%)", "options": db_list, "status": status, "error_message": msg, "unit": "percent", }, "total_checkpoints": { "label": "Total database checkpoints", "options": None, "status": status, "error_message": msg, "unit": "checkpoints", }, "minutes_between_checkpoints": { "label": "Minutes between database checkpoints", "options": None, "status": status, "error_message": msg, "unit": "minutes", }, "total_connections": { "label": "Total connections", "options": db_list, "status": status, "error_message": msg, "unit": "connections", }, "active_connections": { "label": "Active connections", "options": db_list, "status": status, "error_message": msg, "unit": "connections", }, "idle_connections": { "label": "Idle connections", "options": db_list, "status": status, "error_message": msg, "unit": "connections", }, "idle_in_txn_connections": { "label": "Idle in transaction connections", "options": db_list, "status": status, "error_message": msg, "unit": "connections", }, "connections_waiting_for_lock": { "label": "Connections waiting for lock", "options": db_list, "status": status, "error_message": msg, "unit": "connections", }, "max_txn_age": { "label": "Max transaction age", "options": db_list, "status": status, "error_message": msg, "unit": "seconds", }, "temp_files_created": { "label": "Temp files generated", "options": db_list, "status": status, "error_message": msg, "unit": "files", }, "temp_bytes_created": { "label": "Temp bytes generated", "options": db_list, "status": status, "error_message": msg, "unit": "bytes", }, "checkpoints_timed": { "label": "Timed Checkpoints", "options": None, "status": status, "error_message": msg, "unit": "checkpoints", }, "checkpoints_req": { "label": "Requested Checkpoints", "options": None, "status": status, "error_message": msg, "unit": "checkpoints", }, } return data def check(self, textkey, data, config): if data: db_name = data else: db_name = config.get("database", "postgres") try: ####################################### # these are point-in-time metric values ####################################### if textkey == "minutes_between_checkpoints": res = execute_query( config, "SELECT seconds_since_start / total_checkpoints / 60 AS minutes_between_checkpoints FROM (SELECT EXTRACT(EPOCH FROM (now() - pg_postmaster_start_time())) AS seconds_since_start, (checkpoints_timed+checkpoints_req) AS total_checkpoints FROM pg_stat_bgwriter) AS sub;", ) row = res[0] self.log.debug("Minutes between database checkpoints: %s" % str(row[0])) return int(row[0]) elif textkey == "checkpoints_timed": res = execute_query( config, "SELECT checkpoints_timed from pg_stat_bgwriter;" ) row = res[0] self.log.debug("Timed checkpoints: %s" % str(row[0])) return int(row[0]) elif textkey == "checkpoints_req": res = execute_query( config, "SELECT checkpoints_req from pg_stat_bgwriter;" ) row = res[0] self.log.debug("Timed checkpoints: %s" % str(row[0])) return int(row[0]) elif textkey == "active_connections": res = execute_query( config, "SELECT count(*) FROM pg_stat_activity WHERE state='active' AND datname='%s';" % db_name, ) row = res[0] self.log.debug("Active connections: %s" % str(row[0])) return int(row[0]) elif textkey == "idle_connections": res = execute_query( config, "SELECT count(*) FROM pg_stat_activity WHERE state='idle' AND datname='%s';" % db_name, ) row = res[0] self.log.debug("Idle connections: %s" % str(row[0])) return int(row[0]) elif textkey == "idle_in_txn_connections": res = execute_query( config, "SELECT count(*) FROM pg_stat_activity WHERE state='idle in transaction' AND datname='%s';" % db_name, ) row = res[0] self.log.debug("Idle in transaction connections: %s" % str(row[0])) return int(row[0]) elif textkey == "total_connections": res = execute_query( config, "SELECT count(*) FROM pg_stat_activity WHERE datname='%s';" % db_name, ) row = res[0] self.log.debug("Total connections: %s" % str(row[0])) return int(row[0]) elif textkey == "connections_waiting_for_lock": res = execute_query( config, "SELECT count(*) FROM pg_locks pgl, pg_stat_activity pgsa WHERE pgl.pid = pgsa.pid and pgl.granted = false and pgsa.datname='%s';" % db_name, ) row = res[0] self.log.debug("Connections waiting for lock: %s" % str(row[0])) return int(row[0]) elif textkey == "max_txn_age": res = execute_query( config, "SELECT max(now() - xact_start) FROM pg_stat_activity WHERE state IN ('idle in transaction', 'active') AND datname='%s';" % db_name, ) row = res[0] self.log.debug("Max transaction age: %s" % str(row[0])) return int(row[0].seconds) elif textkey == "numbackends": res = execute_query( config, "SELECT numbackends FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("Number of active backends: %s" % str(row[0])) return int(row[0]) ################################################################ # these are guages which require the different to be calculated ################################################################ cached_result = self.get_cache_results(textkey, db_name) if textkey == "rows_select_idx": res = execute_query( config, "SELECT tup_fetched FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("SELECTs (from scans) (rows/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "rows_select_scan": res = execute_query( config, "SELECT tup_returned FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("SELECTs (from scans) (rows/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "rows_insert": res = execute_query( config, "SELECT tup_inserted FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("INSERTs (rows/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "rows_update": res = execute_query( config, "SELECT tup_updated FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("UPDATEs (rows/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "rows_delete": res = execute_query( config, "SELECT tup_deleted FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("DELETEs (rows/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "rows_total": res = execute_query( config, "SELECT sum(tup_fetched + tup_returned + tup_inserted + tup_updated + tup_deleted) from pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("Total queries (rows/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "xact_commit": res = execute_query( config, "SELECT xact_commit FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("Transactions committed (txn/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "xact_rollback": res = execute_query( config, "SELECT xact_rollback FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug( "Transactions rolled back (txn/second): %s" % str(row[0]) ) queried_value = int(row[0]) elif textkey == "xact_total": res = execute_query( config, "SELECT sum(xact_commit + xact_rollback) FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("Total transactions (txn/second): %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "blks_read": res = execute_query( config, "SELECT blks_read FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug( "Blocks read from disk (blocks/second): %s" % str(row[0]) ) queried_value = int(row[0]) elif textkey == "blks_hit": res = execute_query( config, "SELECT blks_hit FROM pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug( "Blocks read from buffer cache (blocks/second): %s" % str(row[0]) ) queried_value = int(row[0]) elif textkey == "blks_cache_pc": res = execute_query( config, "SELECT blks_read, blks_hit FROM pg_stat_database where datname='%s';" % db_name, ) row = list(map(float, res[0])) if row[0] or row[1]: result = int(row[1] / (row[0] + row[1]) * 100) else: result = 0 self.log.debug("Buffer cache hit rate (%%): %s" % str(result)) queried_value = int(result) elif textkey == "total_checkpoints": res = execute_query( config, "SELECT total_checkpoints FROM (SELECT EXTRACT(EPOCH FROM (now() - pg_postmaster_start_time())) AS seconds_since_start, (checkpoints_timed+checkpoints_req) AS total_checkpoints FROM pg_stat_bgwriter) AS sub;", ) row = res[0] self.log.debug("Total database checkpoints: %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "temp_files_created": res = execute_query( config, "select temp_files from pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("Temp files generated: %s" % str(row[0])) queried_value = int(row[0]) elif textkey == "temp_bytes_created": res = execute_query( config, "select temp_bytes from pg_stat_database where datname='%s';" % db_name, ) row = res[0] self.log.debug("Temp bytes generated: %s" % str(row[0])) queried_value = int(row[0]) except: self.log.error( "Unable to gather metric - double check the PostgreSQL credentials provided in the agent config file" ) return None # we have a cached result, so do the math and return the differential if cached_result: cached_value = cached_result[0][1] self.cache_result(textkey, db_name, queried_value, replace=True) return int(queried_value - cached_value) # no cached value in the agent's cache. This could be the first run of the metric, so return 0 and let the next run report the correct value else: self.cache_result(textkey, db_name, queried_value, replace=True) return 0 process.py000064400000042352151700142040006574 0ustar00import os import sys import agent_util try: import psutil except: psutil = None import re class ProcessPlugin(agent_util.Plugin): textkey = "process" label = "Process" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if "aix" in sys.platform: status = agent_util.SUPPORTED msg = None elif "darwin" in sys.platform: status = agent_util.SUPPORTED msg = None elif "vmware" in sys.platform: status = agent_util.SUPPORTED msg = None elif psutil is None: # Unable to import psutil self.log.info( "Unable to import psutil library, no process metrics available" ) status = agent_util.UNSUPPORTED msg = "Unable to import psutil library, please install and rebuild metadata" elif not os.path.exists("/proc"): self.log.info("/proc not found") status = agent_util.UNSUPPORTED msg = "Enable procfs." return {} if "vmware" in sys.platform: metadata = { "process.named_count": { "label": "Number of processes - name", "options": None, "status": status, "error_message": msg, "unit": "processes", "option_string": True, }, "process.named_count.full": { "label": "Number of processes - full command line", "options": None, "status": status, "error_message": msg, "unit": "processes", "option_string": True, }, "process.exists": { "label": "Process is running", "options": None, "status": status, "error_message": msg, "unit": "boolean", "option_string": True, }, "process.exists.full": { "label": "Process is running - full command line", "options": None, "status": status, "error_message": msg, "unit": "boolean", "option_string": True, }, } else: metadata = { "process.running_count": { "label": "Number of processes running", "options": None, "status": status, "error_message": msg, "unit": "processes", }, "process.named_count": { "label": "Number of processes - name", "options": None, "status": status, "error_message": msg, "unit": "processes", "option_string": True, }, "process.named_memory_percentage": { "label": "Memory percentage of processes - name", "options": None, "status": status, "error_message": msg, "unit": "percent", "option_string": True, }, "process.named_cpu_percentage": { "label": "CPU percentage of processes - name", "options": None, "status": status, "error_message": msg, "unit": "percent", "option_string": True, }, "process.exists": { "label": "Process is running", "options": None, "status": status, "error_message": msg, "unit": "boolean", "option_string": True, }, "process.thread_count": { "label": "Process Thread Count - executable name only", "options": None, "status": status, "error_message": msg, "unit": "threads", "option_string": True, }, "process.named_count.full": { "label": "Number of processes - full command line", "options": None, "status": status, "error_message": msg, "unit": "processes", "option_string": True, }, "process.named_memory_percentage.full": { "label": "Memory percentage of processes - full command line", "options": None, "status": status, "error_message": msg, "unit": "percent", "option_string": True, }, "process.named_memory_raw_mb.full": { "label": "MB of memory used by processes - full command line", "options": None, "status": status, "error_message": msg, "unit": "MB", "option_string": True, }, "process.named_cpu_percentage.full": { "label": "CPU percentage of processes - full command line", "options": None, "status": status, "error_message": msg, "unit": "percent", "option_string": True, }, "process.exists.full": { "label": "Process is running - full command line", "options": None, "status": status, "error_message": msg, "unit": "boolean", "option_string": True, }, "process.thread_count.full": { "label": "Process Thread Count - executable name and args", "options": None, "status": status, "error_message": msg, "unit": "threads", "option_string": True, }, } return metadata def check(self, textkey, data, config): if "aix" in sys.platform or "sunos" in sys.platform: # ps_cmd = "ps -eo %p' '%a" if "aix" in sys.platform: ps_cmd = "ps axww" elif "sunos" in sys.platform: ps_cmd = "ps -eo pid' 'args" retcode, output = agent_util.execute_command(ps_cmd) output = output.split("\n") if textkey.endswith(".full"): textkey = textkey.rstrip(".full") if textkey == "process.running_count": return len(output) - 1 else: count = 0 pids = [] for line in output[1:]: if data in line: count += 1 pids.append(line.split()[0]) if textkey == "process.named_count": return count elif textkey == "process.exists": if count: return 1 else: return 0 elif textkey in [ "process.named_memory_percentage", "process.named_cpu_percentage", "named_memory_raw_mb", ]: all_cpu = 0 all_mem = 0 all_raw_kb = 0 for pid in pids: if "aix" in sys.platform: ps = "ps -fp %s -o pcpu,pmem,rss" % pid elif "sunos" in sys.platform: ps = "ps -fp %s -o pcpu,pmem,rss" % pid ret, output = agent_util.execute_command(ps) output = output.strip().split("\n") if len(output) < 2: continue fields = output[1].split() cpu = float(fields[0]) mem = float(fields[1]) raw_kb = float(fields[2]) all_cpu += cpu all_mem += mem all_raw_kb += raw_kb if textkey == "process.named_memory_percentage": return all_mem if textkey == "process.named_memory_raw_mb": return float(all_raw_kb) / 1024 else: return all_cpu # Unknown AIX/Solaris textkey return None # vmware logic elif "vmware" in sys.platform: pgrep = agent_util.which("pgrep") cmd = "" if not pgrep: self.log.error("Unable to find 'pgrep'! Unable to check for processes") return None if textkey.endswith(".full"): cmd = "%s -f" % (pgrep) else: cmd = "%s" % (pgrep) cmd += " %s" % data ret, output = agent_util.execute_command(cmd) out = output.split("\n") if textkey.startswith("process.named_count"): return len(out) - 1 if textkey.startswith("process.exists"): if len(out) - 1 > 0: return 1 else: return 0 if psutil is None: # Unable to import psutil, log and move on self.log.info( "Unable to import psutil library, no process metrics available" ) return None # Default Linux/FreeBSD logic search = "name" if textkey.endswith(".full"): textkey = textkey.rstrip(".full") search = "cmdline_str" # updated to use psutil library to get all processes and pull the name, pid and cmdline # this gets all the processes as objects process_objs = psutil.process_iter() processes = [] data = str(data) if textkey.startswith("process.named_cpu_percentage") and search == "name": for proc in process_objs: try: if re.search(data, proc.name()): processes.append( proc.as_dict(attrs=["pid", "cmdline", "name", "cpu_times"]) ) except psutil.NoSuchProcess: self.log.exception("Unable to get process.") continue elif ( textkey.startswith("process.named_cpu_percentage") and search == "cmdline_str" ): for proc in process_objs: try: if re.search(data, " ".join(proc.cmdline())): processes.append( proc.as_dict(attrs=["pid", "cmdline", "name", "cpu_times"]) ) except psutil.NoSuchProcess: self.log.exception("Unable to get process.") continue else: # this iterates through and shaves off infomation that isn't needed so we can filter on it later for proc in process_objs: try: processes.append( proc.as_dict( attrs=[ "pid", "cmdline", "name", "cpu_percent", "memory_percent", "memory_info", "num_threads", ] ) ) except psutil.NoSuchProcess: self.log.exception("Unable to get process.") continue # setting up a process list for us to transform the split cmdline entires into cmdline_str for proc in processes: if not proc["cmdline"]: proc["cmdline_str"] = "" continue proc["cmdline_str"] = " ".join(proc["cmdline"]) self.log.debug("All running processes:\n%s\n" % processes) if textkey == "process.running_count" or textkey == "count": return len(psutil.pids()) if textkey == "process.named_count": found_procs = [] self.log.info("Searching processes for '%s'" % data) for proc in processes: if proc[search] is not None and re.search(data, proc[search]): found_procs.append(proc) self.log.debug(found_procs) return len(found_procs) elif textkey == "process.exists": found_procs = [] self.log.info("Searching processes for '%s'" % data) for proc in processes: if proc[search] is not None and re.search(data, proc[search]): found_procs.append(proc) self.log.debug(found_procs) if found_procs: return 1 else: return 0 elif textkey in [ "process.named_memory_percentage", "process.named_memory_raw_mb", "process.thread_count", ]: found_procs = [] self.log.info("Searching processes for '%s'" % data) for proc in processes: if proc[search] is not None and re.search(data, proc[search]): found_procs.append(proc) self.log.debug(found_procs) self.log.debug( "Found matching processes: %s" % [p[search] for p in found_procs] ) # return 0 if no procs found if not found_procs: return 0 all_cpu = 0 all_mem = 0 all_raw_mem = 0 all_thread_count = 0 if "darwin" == sys.platform: rv = self.findDarwinProcInfo(found_procs) all_cpu = rv["cpu_percent"] all_mem = rv["memory_percent"] all_raw_mem = rv["memory_info"] all_thread_count = rv["num_threads"] else: for pid in found_procs: cpu = pid["cpu_percent"] mem = pid["memory_percent"] mem_raw = pid["memory_info"].rss thread_count = pid["num_threads"] all_cpu += cpu all_mem += mem all_raw_mem += mem_raw all_thread_count += thread_count all_raw_mem = float(all_raw_mem) / (1024 * 1024) if textkey == "process.named_memory_percentage": return all_mem if textkey == "process.named_memory_raw_mb": return all_raw_mem if textkey == "process.thread_count": return all_thread_count else: return all_cpu elif textkey in [ "process.named_cpu_percentage", "process.named_cpu_percentage.full", ]: if processes: user_sum = sum( [ p.get("cpu_times").user + p.get("cpu_times").system for p in processes ] ) else: return 0.0 last_result = self.get_cache_results(textkey, data) if not last_result: self.cache_result(textkey, data, user_sum) return None delta, previous = last_result[0] time_used_result = (user_sum - previous) / delta self.cache_result(textkey, data, user_sum) number_of_cores = psutil.cpu_count() if not number_of_cores: number_of_cores = 1 return (time_used_result / number_of_cores) * 100 return 0 def findDarwinProcInfo(self, found_procs): """ On OSX, psutil will not report process information on processes belonging to other users, unless the requesting process is privileged. https://github.com/giampaolo/psutil/issues/883 """ pids = [] for fp in found_procs: pids.append(str(fp["pid"])) rc, output = agent_util.execute_command("ps uM -p {}".format(",".join(pids))) lines = output.split("\n") procLines = lines[1:] rv = { "cpu_percent": float(0), "memory_percent": float(0), "memory_info": float(0), "num_threads": len(procLines), } for l in procLines: info = l.split() if len(info) == 14: # Full info line for process rv["cpu_percent"] += float(info[2]) rv["memory_percent"] += float(info[3]) rv["memory_info"] += float(info[5]) m = rv["memory_info"] rv["memory_info"] = float(m) / 1024 return rv rabbitmq.py000064400000047551151700142040006725 0ustar00import agent_util from base64 import b64encode import string import urllib import logging try: # Python 2.x from httplib import HTTPConnection, HTTPSConnection, HTTPException except: from http.client import HTTPConnection, HTTPSConnection, HTTPException BYTES_TO_KB_KEYS = [ "memory", "nodes.mem_used", "nodes.io_read_bytes", "nodes.mem_used_details.rate", "nodes.io_read_bytes_details.rate", "nodes.io_write_bytes", "nodes.io_write_bytes_details.rate", ] def execute_query(config, command, option=""): if command == "nodes": # For nodes we want to double check we haven't got a local cache version # first. cached_result = agent_util.LOCAL_CACHE_RESULTS.get("rabbit@/api/nodes") if cached_result: logging.info( "Retrieved information from the local cache for rabbit@/api/nodes" ) return cached_result elif command == "queues": cached_result = agent_util.LOCAL_CACHE_RESULTS.get("rabbit@/queues") if cached_result: logging.info( "Retrieved information from the local cache for rabbit@/api/queues" ) auth = b64encode("%s:%s" % (config["username"], config["password"])) headers = { "Authorization": "Basic %s" % auth, } output = None conn_host = config.get("management_interface_host", "http://localhost") if conn_host.startswith("https"): conn = HTTPSConnection( string.replace(conn_host, "https://", ""), config["management_interface_port"], ) else: conn = HTTPConnection( string.replace(conn_host, "http://", ""), config["management_interface_port"], ) conn.request("GET", "/api/" + command, headers=headers) r = conn.getresponse() status, output = r.status, r.read() conn.close() if command == "nodes": # Save the latest result in our internal cache agent_util.LOCAL_CACHE_RESULTS["rabbit@/api/nodes"] = output elif command == "queues": agent_util.LOCAL_CACHE_RESULTS["rabbit@/queues"] = output return output class RabbitMQPlugin(agent_util.Plugin): textkey = "rabbitmq" label = "rabbitmq" DEFAULTS = { "management_interface_host": "http://localhost", "management_interface_port": 15672, "username": "guest", "password": "guest", } @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if rabbitmq is even installed installed = agent_util.which("rabbitmqctl") if not installed and not config.get("from_docker"): self.log.info("rabbitmqctl binary not found") status = agent_util.UNSUPPORTED msg = "rabbitmqctl binary not found" return {} INTERFACE_NOT_FOUND_ERROR = """ No [rabbitmq] config block was found in the config file and we were unable to access the HTTP management interface via the expected port (%s). Please verify that the RabbitMQ management plugin is installed. """ # check that management plugin is installed and running if config: new_config = self.DEFAULTS.copy() new_config.update(config) config = new_config else: config = self.DEFAULTS # check the specified port try: conn_host = config.get("management_interface_host", "http://localhost") if conn_host.startswith("https"): conn = HTTPSConnection( string.replace(conn_host, "https://", ""), config["management_interface_port"], ) else: conn = HTTPConnection( string.replace(conn_host, "http://", ""), config["management_interface_port"], ) conn.request("GET", "/") r = conn.getresponse() conn.close() except Exception: status = agent_util.MISCONFIGURED msg = INTERFACE_NOT_FOUND_ERROR % config["management_interface_port"] self.log.exception( "Interface not found %s" % config["management_interface_port"] ) self.log.info(msg) if status == agent_util.SUPPORTED and r.status != 200: status = agent_util.MISCONFIGURED msg = INTERFACE_NOT_FOUND_ERROR % config["management_interface_port"] self.log.info(msg) objects, queue_opts = None, None queues_schema = {"resource": "string", "vhost": "string", "queue": "string"} queue_opts = [] node_opts = [] if status == agent_util.SUPPORTED: try: overview = agent_util.json_loads(execute_query(config, "overview")) objects = [obj.title() for obj in overview["object_totals"].keys()] queues = agent_util.json_loads(execute_query(config, "queues")) for entry in queues: resource_name = entry["vhost"] + ":" + entry["name"] queue_opts.append( { "resource": resource_name, "queue": entry["name"], "vhost": entry["vhost"], } ) nodes = agent_util.json_loads(execute_query(config, "nodes")) node_opts = [node["name"] for node in nodes] except: self.log.exception("error querying rabbitmq management API") status = agent_util.MISCONFIGURED msg = "Unable to access RabbitMQ metrics. Please ensure that the management plugin is installed and that your credentials are valid in the agent config file." metadata = { "object_totals": { "label": "# of Objects", "options": objects, "status": status, "error_message": msg, }, "queue.messages": { "label": "Queue Length", "options": queue_opts, "options_schema": queues_schema, "status": status, "error_message": msg, }, "queue.memory": { "label": "Queue Memory Usage (kB)", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "kB", "error_message": msg, }, "queue.messages_ready": { "label": "Queue number of messages ready for delivery", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count", "error_message": msg, }, "queue.messages_unacknowledged": { "label": "Queue number of unacknowledged messages", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count", "error_message": msg, }, "queue.message_stats.publish": { "label": "Queue messages published recently", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count", "error_message": msg, }, "queue.message_stats.publish_details.rate": { "label": "Queue message publishing rate", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count/sec", "error_message": msg, }, "queue.message_stats.deliver_get": { "label": "Queue messages delivered recently", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count", "error_message": msg, }, "queue.message_stats.deliver_get_details.rate": { "label": "Queue message delivery rate", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count", "error_message": msg, }, "queue.message_stats.redeliver": { "label": "Queue message redelivered recently", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count", "error_message": msg, }, "queue.message_stats.redeliver_details.rate": { "label": "Queue messages redeliver rate", "options": queue_opts, "options_schema": queues_schema, "status": status, "unit": "count/sec", "error_message": msg, }, "overview.queue_totals.messages_ready": { "label": "Cluster number of messages ready for delivery", "options": None, "status": status, "error_message": msg, "unit": "count", }, "overview.queue_totals.messages_unacknowledged": { "label": "Cluster number of unacknowledged messages", "options": None, "status": status, "error_message": msg, "unit": "count", }, "overview.message_stats.publish": { "label": "Cluster messages published recently", "options": None, "status": status, "error_message": msg, "unit": "count", }, "overview.message_stats.publish_details.rate": { "label": "Cluster messages publish rate", "options": None, "status": status, "error_message": msg, "unit": "msgs/sec", }, "overview.message_stats.deliver_get": { "label": "Cluster messages delivered to consumers recently", "options": None, "status": status, "error_message": msg, "unit": "count", }, "overview.message_stats.deliver_details.rate": { "label": "Cluster message delivery rate", "options": None, "status": status, "error_message": msg, "unit": "msgs/sec", }, "overview.message_stats.redeliver": { "label": "Cluster messages redelivered to consumers recently", "options": None, "status": status, "error_message": msg, "unit": "count", }, "overview.message_stats.redeliver_details.rate": { "label": "Cluster message redelivery rate", "options": None, "status": status, "error_message": msg, "unit": "msgs/sec", }, "nodes.mem_used": { "label": "Node total amount of memory used", "options": node_opts, "status": status, "error_message": msg, "unit": "kBs", }, "nodes.mem_used_details.rate": { "label": "Node memory used rate", "options": node_opts, "status": status, "error_message": msg, "unit": "kBs/sec", }, "nodes.io_read_count": { "label": "Node total number of read operations by the persister", "options": node_opts, "status": status, "error_message": msg, "unit": "count", }, "nodes.io_read_count_details.rate": { "label": "Node rate of read count", "options": node_opts, "status": status, "error_message": msg, "unit": "count/sec", }, "nodes.io_read_bytes": { "label": "Node total number of bytes read from disk by the persister", "options": node_opts, "status": status, "error_message": msg, "unit": "kBs", }, "nodes.io_read_bytes_details.rate": { "label": "Node rate of read kBs", "options": node_opts, "status": status, "error_message": msg, "unit": "kBs/sec", }, "nodes.io_write_bytes": { "label": "Node total number of bytes written to disk by the persister", "options": node_opts, "status": status, "error_message": msg, "unit": "kBs", }, "nodes.io_write_bytes_details.rate": { "label": "Node rate of written bytes", "options": node_opts, "status": status, "error_message": msg, "unit": "kBs/sec", }, "nodes.fd_used": { "label": "Node file descriptors used", "options": node_opts, "status": status, "error_message": msg, "unit": "count", }, "nodes.fd_used_details.rate": { "label": "Node file descriptors used rate", "options": node_opts, "status": status, "error_message": msg, "unit": "count/sec", }, "nodes.sockets_total": { "label": "Node sockets available", "options": node_opts, "status": status, "error_message": msg, "unit": "count", }, "nodes.sockets_used": { "label": "Node sockets used", "options": node_opts, "status": status, "error_message": msg, "unit": "count", }, "nodes.socktets_used_details.rate": { "label": "Node sockets used rate", "options": node_opts, "status": status, "error_message": msg, "unit": "count/sec", }, "nodes.proc_used": { "label": "Node number of Erlang processes in use", "options": node_opts, "status": status, "error_message": msg, "unit": "count", }, "nodes.proc_used_details.rate": { "label": "Node processor used rate", "options": node_opts, "status": status, "error_message": msg, "unit": "count/sec", }, } return metadata @classmethod def get_metadata_docker(self, container, config): if "management_interface_host" not in config: try: ip = agent_util.get_container_ip(container) config["management_interface_host"] = ip except Exception as e: self.log.exception(e) config["from_docker"] = True return self.get_metadata(config) def check(self, textkey, option, config): if config: new_config = self.DEFAULTS.copy() new_config.update(config) config = new_config else: config = self.DEFAULTS if "." in textkey: command, key = textkey.split(".", 1) if command.startswith("queue"): command += "s" try: json_data = execute_query(config, command, option) except Exception: self.log.exception( "Unable to get %s from %s" % (command, config.get("management_interface_host")) ) return None data = agent_util.json_loads(json_data) # If no object (queue, exchange, host, etc.) is specified, report an aggregate try: if isinstance(data, list): if command.startswith("nodes"): # Node calls return all nodes information, # need to filter them by option found_data = None for entry in data: if entry.get("name") == option: # Found our node found_data = entry break if not found_data: self.log.info( "Unable to find information for node %s" % option ) return None else: res = self._parse_dict_tree(key, found_data) elif command.startswith("queues"): # Queue calls are organized by vhost and queue name. # Need to filter them found_data = None vhost, queue_name = option.split(":") for entry in data: if ( entry.get("vhost") == vhost and entry.get("name") == queue_name ): found_data = entry break if not found_data: self.log.info( "Unable to find information for vhost %s queue %s" % (vhost, queue_name) ) return None else: res = self._parse_dict_tree(key, found_data) else: res = sum([obj[key] for obj in data]) else: if "." not in key: res = data[key] if not bool(res): res = None else: res = self._parse_dict_tree(key, data) except Exception: self.log.exception("Error gathering data") return None if key == BYTES_TO_KB_KEYS: res /= 1000.0 elif textkey == "object_totals": json_data = execute_query(config, "overview") res = agent_util.json_loads(json_data)["object_totals"][option.lower()] if textkey.endswith("rate") and res < 0: # Rates that go below 0 we will turn to 0. res = 0 return res def check_docker(self, container, textkey, option, config): if "management_interface_host" not in config: try: ip = agent_util.get_container_ip(container) config["management_interface_host"] = ip except Exception as e: self.log.exception(e) config["from_docker"] = True return self.check(textkey, option, config) def _parse_dict_tree(self, key, data): """ Using the key as the nodes, parse the data dictionary to extract the information. E.g metric_used_details.rate looks for data['metric_used_details']['rate'] """ entries = key.split(".") value = data for entry in entries: value = value.get(entry) if not bool(value): value = None break return value redis.py000064400000026502151700142040006223 0ustar00import agent_util import csv import sys from agent_util import float import re if sys.version[0] == "3": from io import StringIO else: from StringIO import StringIO def execute_query(config, query): cmd = agent_util.which("redis-cli") if config.get("hostname"): cmd += " -h %s" % config["hostname"] if config.get("password"): cmd += " -a '%s'" % config["password"] if config.get("port", None) is not None: cmd += " -p {}".format(config["port"]) cmd += " %s" % query status, output = agent_util.execute_command( cmd, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) if status != 0: raise Exception(output) output = StringIO(output) parsed_output = list(csv.reader(output, delimiter="\t")) output_dict = {} for item in parsed_output: if len(item): a = next(csv.reader([item[0]], delimiter=":", quotechar="'")) if len(a) == 2: output_dict[a[0]] = a[1] return output_dict def execute_simple_query(config, query, db=None): "Make a call to Redis CLI that returns a single value" cmd = agent_util.which("redis-cli") if config.get("hostname"): cmd += " -h %s" % config["hostname"] if config.get("password"): cmd += " -a '%s'" % config["password"] if config.get("port", None) is not None: cmd += " -p {}".format(config["port"]) if db: cmd += " -n '%s'" % db cmd += " --csv %s" % query status, output = agent_util.execute_command( cmd, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) if status != 0: raise Exception(output) try: value = float(output.strip()) except: value = None return value class RedisPlugin(agent_util.Plugin): textkey = "redis" label = "Redis" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if redis is even installed installed = agent_util.which("redis-cli") and ( config or agent_util.which("redis-server") ) if not installed: status = agent_util.UNSUPPORTED if config.get("from_docker"): msg = "Please install the redis-cli on the docker host" self.log.info(msg) else: self.log.info("redis binary not found") msg = "redis binary not found" return {} if status == agent_util.SUPPORTED: try: output = execute_query(config, "ping") except: self.log.exception("error running redis query") status = agent_util.MISCONFIGURED msg = "Unable to connect to redis server, please check your Redis connection settings in the agent config file." # Get the databases with current key entries output = execute_query(config, "info keyspace") options = ["Total"] options += output.keys() data = { # Server "server.uptime_in_seconds": { "label": "Uptime in seconds", "options": None, "status": status, "error_message": msg, }, # Memory "memory.used_memory": { "label": "Used memory", "options": None, "status": status, "error_message": msg, }, "memory.used_memory_rss": { "label": "Used memory rss", "options": None, "status": status, "error_message": msg, }, "memory.used_memory_peak": { "label": "Used memory peak", "options": None, "status": status, "error_message": msg, }, # Clients "clients.connected_clients": { "label": "Connected clients", "options": None, "status": status, "error_message": msg, }, "clients.blocked_clients": { "label": "Blocked clients", "options": None, "status": status, "error_message": msg, }, # Replication "replication.connected_slaves": { "label": "Connected slaves", "options": None, "status": status, "error_message": msg, }, "replication.role": { "label": "Replication: role (master=1, slave=0)", "options": None, "status": status, "error_message": msg, }, # Persistence "persistence.rdb_changes_since_last_save": { "label": "Changes since last save", "options": None, "status": status, "error_message": msg, }, "persistence.rdb_bgsave_in_progress": { "label": "Background save in progress", "options": None, "status": status, "error_message": msg, }, # Stats "stats.total_commands_processed": { "label": "Total commands processed", "options": None, "status": status, "error_message": msg, "unit": "processed/s", }, "stats.expired_keys": { "label": "Expired keys", "options": None, "status": status, "error_message": msg, }, "stats.evicted_keys": { "label": "Evicted keys", "options": None, "status": status, "error_message": msg, "unit": "evictions/s", }, "stats.keyspace_hits": { "label": "Keyspace hits", "options": None, "status": status, "error_message": msg, "unit": "hits/s", }, "stats.keyspace_misses": { "label": "Keyspace misses", "options": None, "status": status, "error_message": msg, "unit": "misses/s", }, "stats.pubsub_channels": { "label": "Pub/sub channels", "options": None, "status": status, "error_message": msg, }, "stats.pubsub_patterns": { "label": "Pub/sub patterns", "options": None, "status": status, "error_message": msg, }, "stats.rejected_connections": { "label": "Rejected connections", "options": None, "error_message": msg, "status": status, }, "stats.hit_rate": { "label": "Hit rate", "options": None, "error_message": msg, "status": status, }, "data.llen": { "label": "Length of list", "options": None, "status": status, "option_string": 1, "error_message": msg, }, "data.hlen": { "label": "Count of fields in a hash", "options": None, "status": status, "option_string": 1, "error_message": msg, }, "data.dbsize": { "label": "Total keys", "options": options, "status": status, "error_message": msg, }, "data.dbsize_expiration": { "label": "Total keys with expiration", "options": options, "status": status, "error_message": msg, }, } return data @classmethod def get_metadata_docker(self, container, config): if "hostname" not in config: try: ip = agent_util.get_container_ip(container) config["hostname"] = ip except Exception: self.log.exception("get_metadata_docker error") config["from_docker"] = True return self.get_metadata(config) def check(self, textkey, data, config): result = 0 if textkey in ("data.llen", "data.hlen") and "::" in data: # Split the data to find a database. db, data = data.split("::") else: db = None if textkey == "data.llen": return execute_simple_query(config, "llen %s" % data, db=db) elif textkey == "data.hlen": return execute_simple_query(config, "hlen %s" % data, db=db) redis_info = execute_query(config, "INFO") if textkey in ("data.dbsize", "data.dbsize_expiration"): if textkey == "data.dbsize": exp = r"^keys=(\d+).*$" else: exp = r"^.*expires=(\d+).*$" if data == "Total": output = execute_query(config, "info keyspace") keys = output.keys() for key in keys: key_info = redis_info.get(key) if key_info: found = re.match(exp, key_info) result += int(found.groups()[0]) else: key_info = redis_info.get(data) if key_info: found = re.match(exp, key_info) if found: result = found.groups()[0] else: result = 0 elif textkey == "stats.hit_rate": keyspace_hits = int(redis_info["keyspace_hits"]) keyspace_miss = int(redis_info["keyspace_misses"]) if keyspace_hits + keyspace_miss != 0: result = keyspace_hits / (keyspace_hits + keyspace_miss) else: result = redis_info[textkey[textkey.rfind(".") + 1 :]] if textkey == "replication.role": if result == "master": result = 1 else: result = 0 try: result = int(result) except Exception: result = 0 if textkey: self.log.debug("%s: %d" % (textkey, result)) if textkey in ( "stats.evicted_keys", "stats.keyspace_hits", "stats.keyspace_misses", "stats.total_commands_processed", ): cache = self.get_cache_results(textkey, data) self.cache_result(textkey, data, result) if not cache: return None delta, previous = cache[0] if result < previous: return None result = (result - previous) / float(delta) return result def check_docker(self, container, textkey, data, config): if "hostname" not in config: try: ip = agent_util.get_container_ip(container) config["hostname"] = ip except Exception: self.log.exception("check_docker error") config["from_docker"] = True return self.check(textkey, data, config) sendmail.py000064400000004653151700142040006714 0ustar00import agent_util class SendmailPlugin(agent_util.Plugin): textkey = "sendmail" label = "Sendmail" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None sendmail_bin = agent_util.which("sendmail") # in case sendmail is not in default path for the agent if not sendmail_bin: sendmail_bin = agent_util.which("/usr/sbin/sendmail") if not sendmail_bin: self.log.info("couldn't find sendmail binary") status = agent_util.UNSUPPORTED msg = "Couldn't find sendmail binary" # if they have configured sudo, try it first if sendmail_bin and config.get("use_sudo", 0): if not agent_util.execute_command("sudo -n %s -bp" % sendmail_bin)[0] == 0: self.log.error( "Insufficient permission - Enable sudo access for agent user on sendmail." ) status = agent_util.UNSUPPORTED msg = "SUDO access not configured for fm-agent user" data = { "queue_depth": { "label": "Sendmail queue depth", "options": None, "status": status, "error_message": msg, } } return data def check(self, textkey, data, config={}): sendmail_bin = agent_util.which("sendmail", exc=True) # in case sendmail is not in default path for the agent if not sendmail_bin: sendmail_bin = agent_util.which("/usr/sbin/sendmail", exc=True) if config.get("use_sudo", 0): sudo_string = "sudo -n " else: sudo_string = "" retcode, output = agent_util.execute_command( "%s%s -bp" % (sudo_string, sendmail_bin) ) self.log.debug("sendmail -bp output: %s" % str(output)) # sample output # dev@wlocalhost:/# sendmail -bpc # /var/spool/mqueue is empty # Total requests: 0 output = output.splitlines() for line in output: if line.strip().startswith("Total requests:"): try: return int(line.strip().split("Total requests:")[1].strip()) except: self.log.debug("Failed parsing line: '%s'" % line) return None self.log.debug("No lines matched Total requests string") return None sysctl.py000064400000002663151700142040006440 0ustar00import agent_util import csv import sys def get_sysctl_dict(): sysctl = agent_util.which("sysctl") status, output = agent_util.execute_command("%s -a" % sysctl) if status != 0: raise Exception(output) metadata = {} for item in output.splitlines(): m = item.split(" = ") try: metadata[m[0]] = int(m[-1].strip()) except: pass return metadata class SysctlPlugin(agent_util.Plugin): textkey = "sysctl_v2" label = "Sysctl" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if sysctl is even installed installed = agent_util.which("sysctl") if not installed: self.log.info("sysctl binary not found") status = agent_util.UNSUPPORTED msg = "sysctl binary not found" metadata = {} if status is agent_util.SUPPORTED: metadata = get_sysctl_dict() data = { "sysctl": { "label": "Sysctl metric", "options": None, "status": status, "error_message": msg, "option_string": True, } } return data def check(self, textkey, data, config): metadata = get_sysctl_dict() res = None try: res = metadata[data] except: pass return res tcp.py000064400000015132151700142040005700 0ustar00import agent_util import sys import os import socket import time from datetime import datetime, timedelta DEFAULT_TIMEOUT = 15 class TCPPlugin(agent_util.Plugin): textkey = "tcp" label = "TCP/IP port" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None data = { "tcp_status": { "label": "TCP port status", "options": None, "option_string": True, "status": status, "error_message": msg, "unit": "boolean", }, "udp_status": { "label": "UDP port status", "options": None, "option_string": True, "status": status, "error_message": msg, "unit": "boolean", }, "tcp_latency": { "label": "TCP port latency", "options": None, "option_string": True, "status": status, "error_message": msg, "unit": "secs", }, } # for Linux, add the network connection state metrics if "linux" in sys.platform and agent_util.which("netstat"): CONNECTION_STATUS = [ "CLOSE_WAIT", "CLOSED", "ESTABLISHED", "FIN_WAIT_1", "FIN_WAIT_2", "LAST_ACK", "LISTEN", "SYN_RECV", "SYN_RECEIVED", "SYN_SEND", "TIME_WAIT", ] data["network.connections.tcp"] = { "label": "TCP Connections", "options": CONNECTION_STATUS, "status": status, "error_message": msg, "unit": "connections", } data["network.connections.tcpv6"] = { "label": "TCPv6 Connections", "options": CONNECTION_STATUS, "status": status, "error_message": msg, "unit": "connections", } data["network.connections.udp"] = { "label": "UDP Connections", "options": CONNECTION_STATUS, "status": status, "error_message": msg, "unit": "connections", } data["network.connections.udpv6"] = { "label": "UDPv6 Connections", "options": CONNECTION_STATUS, "status": status, "error_message": msg, "unit": "connections", } return data def check(self, textkey, option, config): # special logic for getting TCP/UDP connection counts if textkey.startswith("network.connections"): ret, output = agent_util.execute_command("netstat -tunap") lines = output.splitlines()[1:] header = True connection_stats = {} for line in lines: if line.startswith("Proto"): header = False continue elif header: continue line = line.split() proto = line[0].strip() state = line[5].strip() if connection_stats.get(proto): if connection_stats[proto].get(state): connection_stats[proto][state] += 1 else: connection_stats[proto][state] = 1 else: connection_stats[proto] = {} connection_stats[proto][state] = 1 # establish a new variable for clarity conn_state = option if textkey == "network.connections.tcp": return float(connection_stats.get("tcp", {}).get(conn_state, 0)) elif textkey == "network.connections.tcpv6": return float( connection_stats.get("tcp6", connection_stats.get("tcpv6", {})).get( conn_state, 0 ) ) elif textkey == "network.connections.udp": return float(connection_stats.get("udp", {}).get(conn_state, 0)) elif textkey == "network.connections.udpv6": return float( connection_stats.get("udp6", connection_stats.get("udpv6", {})).get( conn_state, 0 ) ) else: self.log.error("UNKNOWN NETWORK CONNECTION TEXTKEY- %s" % textkey) return None option_parts = option.split(":") if len(option_parts) == 2: ip_address = option_parts[0] port = int(option_parts[1]) # default to localhost if ip not provided elif len(option_parts) == 1: ip_address = "localhost" port = int(option_parts[0]) else: self.log.info("Invalid TCP/UDP port specification: %s" % option) return None if textkey == "udp_status": ret, output = agent_util.execute_command( "netstat -unap | grep %s | grep %s" % (port, ip_address) ) self.log.debug("udp_status netstat output: %s" % output) lines = output.splitlines() header = True for line in lines: line = line.split() proto = line[0].strip() local_addr = line[3].strip() if proto == "udp": return 1 # return false return 0 else: start = time.time() success = None try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) to = config.get("tcp.timeout") timeout = to if to else DEFAULT_TIMEOUT s.settimeout(timeout) s.connect((ip_address, port)) except socket.timeout: success = False except Exception: t, e = sys.exc_info()[:2] self.log.exception("TCP Connection error: %s" % e) success = False else: s.shutdown(socket.SHUT_RDWR) s.close() success = True duration = time.time() - start if textkey == "tcp_status": return success and 1 or 0 elif textkey == "tcp_latency": return success and duration or None else: return None template.py000064400000006437151700142040006735 0ustar00""" FortiMonitor Agent Plugin Template Copyright 2023 Fortinet, Inc. All Rights Reserved. fm-ops@fortinet.com The agent comes with pre-written plugins for checking common software (MySQL, Apache, etc) and OS checks. If you want to track behavior of some software that's not included or custom written, you can write your own custom plugin that can be tracked by the agent. Place your plugin file in /usr/share/panopta-agent, so the agent will be able to find it when rebuilding metric metadata. """ import agent_util class AgentTemplate(agent_util.Plugin): """ The plugin name MUST end in 'Plugin' and be a subclass of the agent_util.Plugin class Normally the plugin name should match the plugin textkey without special characters """ # This textkey should be unique and be a short identifier for your custom plugin # NOTE: textkey can -not- include spaces, the only special characters allowed are # underscore and periods. We recommend using a reverse domain name prefix # to generate a unique textkey, for example com.yourdomain.yourtemplate textkey = "TEMPLATE_PLUGIN" # Human readable label for your plugin that shows in the FortiMonitor controlpanel label = "PLUGIN TEMPLATE" @classmethod def get_metadata(self, config): """ The get_metadata method specifies what data can be collected. You can also use this to check if the server has the proper dependencies and config entries to run this plugin. Each entry in the dictionary that is returned is another metric type that can be reported on by this plugin. """ # When the agent is able to successfully collect data, return agent_util.SUPPORTED # Other statuses include agent_util.UNSUPPORTED and agent_util.MISCONFIGURED status = agent_util.SUPPORTED # If the plugin cannot be run for any reason, include a helpful message for the user msg = None data = { # 'basic_check' is the metric textkey, these must be unique within the plugin "basic_check": { # Human readable metric name shown in the controlpanel "label": "Checks some value", # Any options (such as disk names, interface name, etc) to be passed into the check "options": None, # Status as mentioned above, normally this is used for the entire plugin # but if some metrics need additional dependencies that aren't met, the ability # to set a status only on one metric is available "status": status, # Any custom error message as mentioned above "error_message": msg, # Unit in which this metric returns data (ms, requests/sec, KB/MB/GB, etc) "unit": "Your unit", }, } return data def check(self, textkey, data, config): """ The check method actually collects the metric values. The complexity of the logic here depends entirely on what data you need to collect, how to do so is up to the developer. NOTE: ALL values must be returned as a float, or None if you're unable to collect a value for the metric. """ value = 0.0 return your_value tomcat.py000064400000016133151700142040006403 0ustar00import re import agent_util from plugins.tomcat_jmx import TomcatJMXPlugin import logging try: from bs4 import BeautifulSoup except: try: import BeautifulSoup except: BeautifulSoup = None from agent_util import float logger = logging.getLogger(__name__) def execute_query(config, query): username = config["username"].strip() password = config["password"].strip() url = config["console_url"] queryType = "wget -qO-" query = query % (queryType, url, username, password) ret, output = agent_util.execute_command(query) return str(output) def get_all_data(config): query = "%s %s/manager/status/all --user=%s --password=%s" output = str(execute_query(config, query)) dom = BeautifulSoup(output) def parse_item(item, prefix): res = "" for line in item.contents: if isinstance(line, basestring) and not line.strip().startswith( "Start time" ): res += "%s " % line results = {} for res in re.findall(r"((.*?):\s(.*?)\s((ms|MB|s)\s)?)", res): key = "%s/%s" % (prefix, res[1].strip()) results[key] = {"value": float(res[2]), "unit": res[4]} return results data = {} all_items = [] for heading in reversed(dom.select("h1")): heading_text = heading.string.strip('"') if heading_text == "Application list": continue if heading_text == "JVM": data[heading_text] = {} p = heading.find_next("p") data[heading_text].update(parse_item(p, "Total")) table = heading.find_next("table") if not table: continue for item in table.find("tbody").find_all("tr"): try: row = item.find_all("td") row = [i.text for i in row] mem_type = row[1].replace(" memory", "") pool = row[0] result = {} result["%s/Initial" % mem_type] = { "value": float(row[2].split(" ")[0]), "unit": row[2].split(" ")[1], } result["%s/Total" % mem_type] = { "value": float(row[3].split(" ")[0]), "unit": row[3].split(" ")[1], } result["%s/Max" % mem_type] = { "value": float(row[4].split(" ")[0]), "unit": row[4].split(" ")[1], } result["%s/Used" % mem_type] = { "value": float(row[5].split(" ")[0]), "unit": row[5].split(" ")[1], } result["%s/Used Percentage" % mem_type] = { "value": float(re.findall(r"\(([0-9]*)%\)", row[5])[0]), "unit": "%", } data[pool] = result except Exception: import sys _, e, _ = sys.exc_info() logging.getLogger("plugin 'tomcat'").error(str(e)) pass continue data[heading_text] = {} p = heading.find_next("p") data[heading_text].update(parse_item(p, "Total")) for item in heading.find_all_next("h2"): if item not in all_items: item["id"] = len(all_items) all_items.append(item) p = item.find_next("p") data[heading_text].update( parse_item(p, item.string.split("[")[0].strip()) ) reverse_data = {} for option, metrics in data.items(): for metric, properties in metrics.items(): metric = metric[0].upper() + metric[1:] if metric not in reverse_data: reverse_data[metric] = {"unit": properties["unit"], "options": {}} reverse_data[metric]["options"][option] = properties["value"] return reverse_data class TomcatPlugin(agent_util.Plugin): textkey = "tomcat" label = "Tomcat" @classmethod def get_metadata(self, config): if ( "username" in config and "console_url" in config and "password" in config ): # Tomcat via wget Plugin status = agent_util.SUPPORTED msg = None # check for tomcat configuration block if ( "username" not in config or "console_url" not in config or "password" not in config or BeautifulSoup == None ): self.log.info("tomcat is not configured") status = agent_util.MISCONFIGURED msg = "tomcat is not configured properly" return {} # check if tomcat is even installed or running ret, output = agent_util.execute_command( "wget -qO- %s" % config["console_url"] ) if config.get("debug", False): self.log.debug("#####################################################") self.log.debug( "Tomcat command 'wget -qO- %s' output:" % config["console_url"] ) self.log.debug(str(output)) self.log.debug("#####################################################") if ret != 0: self.log.info("tomcat is not running or installed") status = agent_util.UNSUPPORTED msg = "tomcat not found" return {} data = {} for metric, properties in get_all_data(config).items(): data[metric] = { "label": metric, "options": sorted(properties["options"].keys()), "status": status, "error_message": msg, "unit": properties["unit"] or None, } return data elif "host" in config and "port" in config: # Tomcat via JMX Plugin return TomcatJMXPlugin.get_metadata(config) self.log.error( ( "tomcat is not configured: you must have either a set" " of [username, console_url, password] or [host, port]" " configured in your tomcat application block." ) ) return {} def check(self, textkey, data, config): if ( "username" in config and "console_url" in config and "password" in config ): # Tomcat via wget Plugin try: return get_all_data(config)[textkey]["options"][data] except: return 0 elif "host" in config and "port" in config: # Tomcat via JMX Plugin return TomcatJMXPlugin.check(textkey, data, config) self.log.error( ( "tomcat is not configured: you must have either a set" " of [username, console_url, password] or [host, port]" " configured in your tomcat application block." ) ) return 0 tomcat_jmx.py000064400000046160151700142040007264 0ustar00import logging import agent_util import jpype from jpype import java, javax logger = logging.getLogger(__name__) class TomcatJMXPlugin(agent_util.Plugin): """Tomcat Plugin for the FortiMonitor Agent using JMX to collect data.""" textkey = "tomcat_jmx" label = "Tomcat (JMX)" JMX_MAPPING = { # Memory "memory.heap": ( "Heap Memory Usage Used", "java.lang", "Memory", None, "HeapMemoryUsage", "used", "bytes", ), "memory.heap.committed": ( "Heap Memory Usage Committed", "java.lang", "Memory", None, "HeapMemoryUsage", "committed", "bytes", ), "memory.heap.init": ( "Heap Memory Usage Init", "java.lang", "Memory", None, "HeapMemoryUsage", "init", "bytes", ), "memory.heap.max": ( "Heap Memory Usage Max", "java.lang", "Memory", None, "HeapMemoryUsage", "max", "bytes", ), "memory.non_heap": ( "Non-Heap Memory Usage Used", "java.lang", "Memory", None, "NonHeapMemoryUsage", "used", "bytes", ), "memory.non_heap.committed": ( "Non-Heap Memory Usage Committed", "java.lang", "Memory", None, "NonHeapMemoryUsage", "committed", "bytes", ), "memory.non_heap.init": ( "Non-Heap Memory Usage Init", "java.lang", "Memory", None, "NonHeapMemoryUsage", "init", "bytes", ), "memory.non_heap.max": ( "Non-Heap Memory Usage Max", "java.lang", "Memory", None, "NonHeapMemoryUsage", "max", "bytes", ), # Threading "threading.count": ( "Thread Count", "java.lang", "Threading", None, "ThreadCount", None, "count", ), # OS "os.cpu_load.process": ( "OS Process CPU Load", "java.lang", "OperatingSystem", None, "ProcessCpuLoad", None, "percent", ), "os.cpu_load.system": ( "OS System CPU Load", "java.lang", "OperatingSystem", None, "SystemCpuLoad", None, "percent", ), "os.open_file_descriptors": ( "OS Open File Descriptor Count", "java.lang", "OperatingSystem", None, "OpenFileDescriptorCount", None, "count", ), # Class loading "class_loading.loaded_classes": ( "Loaded Class Count", "java.lang", "ClassLoading", None, "LoadedClassCount", None, "count", ), # MemoryPool "memory_pool.eden": ( "Eden Space", "java.lang", "MemoryPool", "Eden Space", "Usage", "used", "bytes", ), "memory_pool.eden.ps": ( "PS Eden Space", "java.lang", "MemoryPool", "PS Eden Space", "Usage", "used", "bytes", ), "memory_pool.eden.par": ( "Par Eden Space", "java.lang", "MemoryPool", "Par Eden Space", "Usage", "used", "bytes", ), "memory_pool.eden.g1": ( "G1 Eden Space", "java.lang", "MemoryPool", "G1 Eden Space", "Usage", "used", "bytes", ), "memory_pool.survivor": ( "Survivor Space", "java.lang", "MemoryPool", "Survivor Space", "Usage", "used", "bytes", ), "memory_pool.survivor.ps": ( "PS Survivor Space", "java.lang", "MemoryPool", "PS Survivor Space", "Usage", "used", "bytes", ), "memory_pool.survivor.par": ( "Par Survivor Space", "java.lang", "MemoryPool", "Par Survivor Space", "Usage", "used", "bytes", ), "memory_pool.survivor.g1": ( "G1 Survivor Space", "java.lang", "MemoryPool", "G1 Survivor Space", "Usage", "used", "bytes", ), "memory_pool.old.ps": ( "PS Old Gen", "java.lang", "MemoryPool", "PS Old Gen", "Usage", "used", "bytes", ), "memory_pool.old.cms": ( "CMS Old Gen", "java.lang", "MemoryPool", "CMS Old Gen", "Usage", "used", "bytes", ), "memory_pool.old.g1": ( "G1 Old Gen", "java.lang", "MemoryPool", "G1 Old Gen", "Usage", "used", "bytes", ), # Garbage Collector "gc.young.copy": ( "Copy", "java.lang", "GarbageCollector", "Copy", "CollectionCount", None, "count", ), "gc.young.ps_scavenge": ( "PS Scavenge", "java.lang", "MemoryPool", "PS Scavenge", "CollectionCount", None, "count", ), "gc.young.par_new": ( "ParNew", "java.lang", "GarbageCollector", "ParNew", "CollectionCount", None, "count", ), "gc.young.g1_generation": ( "G1 Young Generation", "java.lang", "GarbageCollector", "G1 Young Generation", "CollectionCount", None, "count", ), "gc.mixed.g1_generation": ( "G1 Mixed Generation", "java.lang", "GarbageCollector", "G1 Mixed Generation", "CollectionCount", None, "count", ), "gc.old.mark_sweep_compact": ( "MarkSweepCompact", "java.lang", "GarbageCollector", "MarkSweepCompact", "CollectionCount", None, "count", ), "gc.old.ps_mark_sweep": ( "PS MarkSweep", "java.lang", "GarbageCollector", "PS MarkSweep", "CollectionCount", None, "count", ), "gc.old.concurrent_mark_sweep": ( "ConcurrentMarkSweep", "java.lang", "GarbageCollector", "ConcurrentMarkSweep", "CollectionCount", None, "count", ), "gc.old.g1_generation": ( "G1 Old Generation", "java.lang", "GarbageCollector", "G1 Old Generation", "CollectionCount", None, "count", ), } @staticmethod def __get_object_name_from_tuple(tuple_): """returns a constructed ObjectName. :type tuple_: tuple (label, domain, type, bean_name, attribute_name, composite_data_key, unit) :param tuple_: A tuple with all the information for an ObjectName. A string that represents the label, a string that represents the domain, and so on and so forth. :rtype: javax.management.ObjectName :return: An ObjectName object that can be used to lookup a MBean. """ domain, type_, bean_name = tuple_[1], tuple_[2], tuple_[3] canonical_name = "%s:" % domain if bean_name: canonical_name += "name=%s," % bean_name if type_: canonical_name += "type=%s" % type_ return javax.management.ObjectName(canonical_name) @classmethod def get_connections_from_config(cls, config): """ Parse the config object to build a structure of connections parameters based on the number of entries that are in each key. The main parameter we base on to split off is host. :type config: dict (host, port, username, password, jvm_path) :param config: Dictionary with the information stored in the config file. :rtype: Dict :return: Dictionary with connection information split up in multiple if needed. """ keys = ["host", "port", "username", "password", "jvm_path"] data = {} for key in keys: key_value = config.get(key) if not key_value and key not in ("username", "password"): raise ValueError("Missing %s information from config" % key) elif not key_value and key in ("username", "password"): # Username and password are not required continue else: values = [value.strip(" ") for value in key_value.split(",")] data[key] = values connections = {} hosts = data["host"] for index, host in enumerate(hosts): connections[index] = { "host": host, } for key in ["port", "username", "password", "jvm_path"]: if len(data.get(key, [])) > 1: # Multiple entries in this config, use the index to apply. value = data[key][index] elif len(data.get(key, [])) == 1: value = data[key][0] elif key not in ("username", "password"): raise ValueError("Missing %s information from config" % (key)) else: # Username and password can be skipped continue connections[index][key] = value return connections @classmethod def __get_connection(cls, config): """ returns a list of connections from the jpype library - a python interface to the Java Native Interface. Wheter there are 1 or many connections depends on the number of entries in the host, port and optionally username/password/jvm entries. :type config: dict :param config: Mapping of information under the application block for this plugin. :rtype: tuple (status, connection, error_message) :return: A tuple containing a numeric value corresponding to the agent_util status'. A MBeanServerConnection object. And, a string with an error message if any. """ status = agent_util.SUPPORTED msg = None # Check that we have an agent config if not config: msg = "No JMX configuration found" cls.log.info(msg) status = agent_util.MISCONFIGURED # Make sure at least host and port are in the config if "host" not in config or "port" not in config: msg = ( "Missing value in the [%s] block of the agent config file" " (e.g host, port)." ) % cls.textkey cls.log.info(msg) status = agent_util.MISCONFIGURED # Try and get the jvm path from the config jvm_path = config.get("jvm_path") # If we can't find it then try and use the default if not jvm_path: try: jvm_path = jpype.getDefaultJVMPath() if not jvm_path: status = agent_util.MISCONFIGURED msg = ( "Unable to find JVM, please specify 'jvm_path' in" " the [%s] block of the agent config file." ) % cls.textkey cls.log.info(msg) except: status = agent_util.MISCONFIGURED msg = ( "Unable to find JVM, please specify 'jvm_path' in the" " [%s] block of the agent config file." ) % cls.textkey cls.log.info(msg) try: # If the JVM has not been started try and start it if status is agent_util.SUPPORTED and not jpype.isJVMStarted(): jpype.startJVM(jvm_path) except: status = agent_util.MISCONFIGURED msg = "Unable to access JMX metrics because JVM cannot be started." cls.log.info(msg) if status is agent_util.SUPPORTED: try: # Start the JVM if its not started # XXX: Redundant logic - is this necessary? if not jpype.isJVMStarted(): jpype.startJVM(jvm_path) j_hash = java.util.HashMap() # If we have a username and password use it as our credentials if config.get("username") and config.get("password"): j_array = jpype.JArray(java.lang.String)( [config.get("username"), config.get("password")] ) j_hash.put( javax.management.remote.JMXConnector.CREDENTIALS, j_array ) url = "service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi" % ( config.get("host"), int(config.get("port")), ) jmx_url = javax.management.remote.JMXServiceURL(url) jmx_soc = javax.management.remote.JMXConnectorFactory.connect( jmx_url, j_hash ) connection = jmx_soc.getMBeanServerConnection() return status, connection, None except Exception: msg = ( "Unable to access JMX metrics, JMX is not running or not installed." ) cls.log.exception(msg) return status, None, msg @classmethod def get_metadata(cls, config): """returns a json object who's textkeys correspond to a given metric available on the JVM. :type config: dict :param config: Mapping of information under the application block for this plugin. :return: JSON Object for all metrics """ result = {} configs = cls.get_connections_from_config(config) connections = {} errors = [] for entry in configs.values(): status, connection, msg = cls.__get_connection(entry) connection_key = "%s:%s" % (entry["host"], entry["port"]) if msg: errors.append("%s %s" % (connection_key, msg)) else: connections[connection_key] = connection if not connections.keys(): cls.log.info("Unable to connect to any connection") for msg in errors: cls.log.error(msg) return result else: status = agent_util.SUPPORTED msg = "" for error in errors: cls.log.warning(error) for key, tuple_ in cls.JMX_MAPPING.items(): object_name = cls.__get_object_name_from_tuple(tuple_) # Check to see if the object exists, if it doesnt we will throw # an error which will be handled silently by continuing through # the for loop. options = [] for connection_key, connection in connections.items(): try: connection.getObjectInstance(object_name) options.append(connection_key) except Exception: cls.log.exception( "Tomcat (JMX) plugin - %s bean not found at %s." % (object_name, connection_key) ) continue if len(connections.keys()) >= 1 and not options: # No connection was able to get this value. Set it to unsupported. options = None status = agent_util.UNSUPPORTED msg = "Unreachable %s at any connection" % key else: # We found options. Is supported. msg = "" status = agent_util.SUPPORTED label, unit = tuple_[0], tuple_[6] result[key] = { "label": label, "options": options, "status": status, "error_message": msg, "unit": unit, } return result @classmethod def check(cls, textkey, data, config): """returns a value for the metric. :type textkey: string :param textkey: Canonical name for a metric. :type data: string :param data: Specific option to check for. :type config: dict :param config: Mapping of information under the application block for this plugin. :rtype: double :return: Value for a specific metric """ entries = cls.get_connections_from_config(config) if data: for entry in entries.values(): possible_match = "%s:%s" % (entry["host"], entry["port"]) if possible_match == data: config = entry else: # Default to the first configuration config = entries[0] status, connection, msg = cls.__get_connection(config) if msg: cls.log.info("Failed to get a connection: %s" % msg) return None tuple_ = cls.JMX_MAPPING.get(textkey) attribute_name, composite_data_key = tuple_[4], tuple_[5] # Create an ObjectName object to lookup object_name = cls.__get_object_name_from_tuple(tuple_) try: object_instance = connection.getObjectInstance(object_name) attribute_value = connection.getAttribute( object_instance.getObjectName(), attribute_name ) attribute_class_name = attribute_value.__class__.__name__ # If the object returned is just a numeric value if "CompositeDataSupport" not in attribute_class_name: return attribute_value.floatValue() # If the attribute object does not have the composite data key # return none if not attribute_value.containsKey(composite_data_key): return None # If the object returned is of type CompositeDataSupport return the # correct value from that object check_result = attribute_value.get(composite_data_key) return check_result.floatValue() except: cls.log.info("Tomcat (JMX) plugin - %s bean not found." % object_name) return 0 unbound_dns.py000064400000011174151700142040007432 0ustar00import os try: import json except ImportError: import simplejson as json import agent_util import datetime def get_unbound_stats(): unbound_stats = {} unbound_binary = agent_util.which("unbound-control") ret, out = agent_util.execute_command("%s stats" % unbound_binary) if ret != 0: print("ERROR RUNNING UNBOUND: %s" % out) return {"error": str(out)}, False tmp = out.strip().split("\n") for line in tmp: k, v = line.split("=") unbound_stats[k] = float(v) return unbound_stats, True def get_unbound_options(unbound_stats): options = [] for opt in unbound_stats.keys(): name = opt.split(".")[0] if name == "time": continue options.append(name) return list(set(options)) class UnboundDNS(agent_util.Plugin): """ WARNING: You MUST SET AT LEAST 755 ON '/etc/unbound/unbound_control.*' AND '/etc/unbound/unbound_server.*' FOR THIS PLUGIN TO WORK PROPERLY """ textkey = "unbound_dns" label = "Unbound DNS Resolver" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None unbound_options = ["total"] # check to confirm unbound is installed unbound_installed = agent_util.which("unbound-control") if not unbound_installed: status = agent_util.UNSUPPORTED msg = "Command unbound-control not installed" return {} else: unbound_stats, passed = get_unbound_stats() if passed is False: self.log.error("ERROR with Unbound:\n%s" % unbound_stats["error"]) stats = agent_util.MISCONFIGURED msg = unbound_stats["error"] else: self.log.debug("Found unbound stats!\n%s" % unbound_stats) unbound_options = get_unbound_options(unbound_stats) self.log.error( "Found %s unbound DNS keys: %s" % (len(unbound_options), unbound_options) ) metadata = { "num.queries": { "label": "Number of queries", "options": unbound_options, "status": status, "error_message": msg, "unit": "", }, "num.cachehits": { "label": "Number of cache hits", "options": unbound_options, "status": status, "error_message": msg, "unit": "", }, "num.cachemiss": { "label": "Cache misses", "options": unbound_options, "status": status, "error_message": msg, "unit": "", }, "num.recursivereplies": { "label": "Recursive replies", "options": unbound_options, "status": status, "error_message": msg, "unit": "", }, "requestlist.avg": { "label": "Average queued queries", "options": unbound_options, "status": status, "error_message": msg, "unit": "", }, "requestlist.max": { "label": "Max queued queries", "options": unbound_options, "status": status, "error_message": msg, "unit": "", }, "recursion.time.avg": { "label": "Average recursion time", "options": unbound_options, "status": status, "error_message": msg, "unit": "seconds", }, "recursion.time.median": { "label": "Median recursion time", "options": unbound_options, "status": status, "error_message": msg, "unit": "seconds", }, "time.up": { "label": "Service uptime", "options": None, "status": status, "error_message": msg, "unit": "seconds", }, } return metadata def check(self, textkey, data, config): unbound_stats, passed = get_unbound_stats() self.log.debug(unbound_stats) if data is None: key = textkey else: key = "%s.%s" % (str(data), str(textkey)) self.log.debug(str(agent_util.which("unbound-control"))) self.log.debug("Checking for Unbound key %s" % key) return unbound_stats.get(key, None) uptime.py000064400000007154151700142040006422 0ustar00import os try: import json except ImportError: import simplejson as json import agent_util import datetime import time import sys from agent_util import float class UptimePlugin(agent_util.Plugin): textkey = "uptime" label = "Machine Uptime" platform = sys.platform @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if "sunos" in self.platform: psr = agent_util.which("psrinfo", exc=True) if not psr: status = agent_util.MISCONFIGURED msg = "Unable to collect uptime data, please make sure psrinfo is installed." else: who = agent_util.which("who", exc=True) if not who: status = agent_util.MISCONFIGURED msg = ( "Unable to collect uptime data, please make sure Who is installed." ) metadata = { "time_since_last_boot": { "label": "Time since last boot", "options": None, "status": status, "error_message": msg, "unit": "minutes", }, } return metadata def check(self, textkey, data, config): if textkey == "time_since_last_boot": lb = "" if "sunos" in self.platform: psrinfo = agent_util.which("psrinfo", exc=True) retcode, output = agent_util.execute_command(psrinfo) lb = output.strip() lb = lb.split()[-2:] lb = " ".join(lb) lb = datetime.datetime.strptime(lb, "%m/%d/%Y %H:%M:%S") elif "darwin" in self.platform: sysctl = agent_util.which("sysctl", exc=True) retcode, output = agent_util.execute_command( "%s -n kern.boottime" % sysctl ) fields = output.strip().split() return time.time() - int(fields[3].strip(",")) elif "aix" in self.platform: uptime = agent_util.which("uptime", exc=True) retcode, output = agent_util.execute_command(uptime) self.log.debug("####\n%s" % str(output)) lb = re.sub(",|days|day", "", str(output)) self.log.debug("####\n%s" % str(lb)) if "day" in output: if "min" in output: lb = re.split(" ", str(lb))[4:7] lb = datetime.timedelta(days=int(lb[0]), minutes=int(lb[-1])) elif "hr" in output: lb = re.split(" ", str(lb))[4:7] lb = datetime.timedelta(days=int(lb[0]), hours=int(lb[-1])) else: lb = re.split(" |:", str(lb))[5:10] lb = datetime.timedelta( days=int(lb[0]), hours=int(lb[-2]), minutes=int(lb[-1]) ) else: if "min" in output: lb = re.split(" ", str(lb))[4] lb = datetime.timedelta(minutes=int(lb)) elif "hr" in output: lb = re.split(" ", str(lb))[4] lb = datetime.timedelta(hours=int(lb)) else: return 60 else: uptime = open("/proc/uptime").read().split()[0] return int(float(uptime)) ct = datetime.datetime.now() diff = ct - lb return int(diff.total_seconds()) return 0 users.py000064400000002130151700142040006245 0ustar00import agent_util import logging logger = logging.getLogger(__name__) class UsersPlugin(agent_util.Plugin): textkey = "users" label = "Users" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None options = [] data = { "users.unique_login_count": { "label": "Users unique login count", "options": options, "status": status, "error_message": msg, }, "users.total_login_count": { "label": "Users total login count", "options": options, "status": status, "error_message": msg, }, } return data def check(self, textkey, data, config): query = "" if textkey == "users.unique_login_count": query = "who |cut -c 1-9 |sort -u |wc -l" if textkey == "users.total_login_count": query = "who | wc -l" ret, output = agent_util.execute_command("%s" % query) return int(output) uwsgi.py000064400000011347151700142040006254 0ustar00import agent_util import logging logger = logging.getLogger(__name__) class UWSGIPlugin(agent_util.Plugin): textkey = "uwsgi" label = "UWSGI" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if uwsgi is even installed installed = agent_util.which("uwsgi") if not installed: self.log.info("The uwsgi binary was not found.") status = agent_util.UNSUPPORTED msg = "uwsgi binary not found" return {} if not config: msg = "The [uwsgi] config block is not found in the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED and ( not "server" in config or not "port" in config ): msg = "The server and port settings were not found in the [uwsgi] block of the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED workers = [] if status is agent_util.SUPPORTED: try: status, output = agent_util.execute_command( "nc %s %s" % (config["server"], config["port"]) ) if status != 0: raise Exception(output) output = agent_util.json_loads(output) for worker in output["workers"]: workers.append("Worker #%s" % worker["pid"]) workers.sort() except: status = agent_util.MISCONFIGURED msg = "Unable to get uwsgi status, please make sure uwsgi is running with the status module enabled and that the connection settings in the agent configuration file are valid." self.log.info(msg) if status == agent_util.SUPPORTED and not workers: status = agent_util.MISCONFIGURED msg = "No workers found." metadata = { "requests": { "label": "Requests per second", "options": workers, "status": status, "error_message": msg, "unit": "requests", }, "exceptions": { "label": "Exceptions", "options": workers, "status": status, "error_message": msg, "unit": "exceptions", }, "status": { "label": "Worker is busy or free to use(BUSY=1, FREE=0)", "options": workers, "status": status, "error_message": msg, }, "rss": { "label": "Worker RSS (Resident Set Size)", "options": workers, "status": status, "error_message": msg, }, "vsz": { "label": "Worker VSZ (Virtual Memory Size)", "options": workers, "status": status, "error_message": msg, }, "running_time": { "label": "How long worker is working", "options": workers, "status": status, "error_message": msg, "unit": "workers", }, "respawn_count": { "label": "How many requests worker did since worker (re)spawn", "options": workers, "status": status, "error_message": msg, "unit": "requests", }, "tx": { "label": "How many data was transmitted by worker", "options": workers, "status": status, "error_message": msg, "unit": "data", }, "avg_rt": { "label": "Average request time", "options": workers, "status": status, "error_message": msg, "unit": "ms", }, } return metadata def check(self, textkey, worker, config): status, output = agent_util.execute_command( "nc %s %s" % (config["server"], config["port"]) ) if status != 0: raise Exception(output) output = agent_util.json_loads(output) workers = output["workers"] pid = worker.replace("Worker #", "") for w in workers: if w["pid"] == int(pid): worker = w break try: res = worker[textkey] if textkey == "status": if res == "idle": res = 0 else: res = 1 except: res = 0 return res varnish.py000064400000011643151700142040006567 0ustar00import agent_util import logging logger = logging.getLogger(__name__) def execute_query(query): ret, output = agent_util.execute_command(query) return str(output) class VarnishPlugin(agent_util.Plugin): textkey = "varnish" label = "Varnish" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if varnish is even installed or running installed = agent_util.which("varnishd") if not installed: self.log.info("The varnishd binary was not found") status = agent_util.UNSUPPORTED msg = "varnish binary not found" return {} stat_binary_path = agent_util.which("varnishstat") if not stat_binary_path: msg = "The varnishstat binary was not found." status = agent_util.MISCONFIGURED self.log.info(msg) data = { "client_conn": { "label": "Client connections accepted", "options": None, "status": status, "error_message": msg, }, "client_req": { "label": "Client requests received", "options": None, "status": status, "error_message": msg, }, "cache_hit": { "label": "Cache hits", "options": None, "status": status, "error_message": msg, }, "cache_hitpass": { "label": "Cache hits for pass", "options": None, "status": status, "error_message": msg, }, "backend_fail": { "label": "Backend conn. failures", "options": None, "status": status, "error_message": msg, }, "cache_miss": { "label": "Cache misses", "options": None, "status": status, "error_message": msg, }, "n_object": { "label": "Nstruct object", "options": None, "status": status, "error_message": msg, }, "n_wrk": { "label": "Nworker threads", "options": None, "status": status, "error_message": msg, }, "n_wrk_create": { "label": "Nworker threads created", "options": None, "status": status, "error_message": msg, }, "n_wrk_failed": { "label": "Nworker threads not created", "options": None, "status": status, "error_message": msg, }, "n_wrk_max": { "label": "Nworker threads limited", "options": None, "status": status, "error_message": msg, }, "n_wrk_drop": { "label": "Ndropped work requests", "options": None, "status": status, "error_message": msg, }, "n_lru_nuked": { "label": "NLRU nuked objects", "options": None, "status": status, "error_message": msg, }, "esi_errors": { "label": "ESI parse errors (unlock)", "options": None, "status": status, "error_message": msg, }, "n_expired": { "label": "Nexpired objects", "options": None, "status": status, "error_message": msg, }, } if "extended_metrics" in config: extended_metrics = [ m.strip().lower() for m in config["extended_metrics"].split(",") ] for m in extended_metrics: data["extended_metric.%s" % m] = { "label": "Varnish %s" % m.replace("_", " "), "options": None, "status": status, "error_message": msg, } return data def check(self, textkey, data, config): stat_binary_path = agent_util.which("varnishstat") query = "%s -1 -f %s" if "extended_metric" in textkey: result = execute_query( query % (stat_binary_path, textkey.replace("extended_metric.", "")) ) else: result = execute_query(query % (stat_binary_path, textkey)) fields = result.split() try: return int(fields[1]) except: return 0 self.log.debug( "%s: %s" % (textkey.replace("extended_metric.", "").title(), str(result)) ) weblogic.py000064400000033531151700142040006710 0ustar00import agent_util import os metrics = { # Work Manager Metrics "work_manager": { "type": "WorkManagerRuntime", "metrics": { "completed_requests": { "label": "The number of requests that have been processed", "property": "CompletedRequests", }, "pending_requests": { "label": "The number of waiting requests in the queue", "property": "PendingRequests", }, "stuck_thread_count": { "label": "The number of threads that are considered to be stuck on the basis of any stuck thread constraints", "property": "StuckThreadCount", }, }, }, # JDBC Data Source Metrics "jdbc_datasource": { "type": "JDBCDataSourceRuntime", "metrics": { "active_connections_current_count": { "label": "The number of connections currently in use by applications", "property": "ActiveConnectionsCurrentCount", } }, }, # EJB Cache Metrics "ejb_cache": { "type": "EJBCacheRuntime", "metrics": { "activation_count": { "label": "Work Manager: The total number of beans from this EJB Home that have been activated", "property": "ActivationCount", }, "cache_access_count": { "label": "Work Manager: The total number of attempts to access a bean from the cache", "property": "CacheAccessCount", }, "cache_beans_current_count": { "label": "Work Manager: The total number of beans from this EJB Home currently in the EJB cache", "property": "CachedBeansCurrentCount", }, "cache_hit_count": { "label": "Work Manager: The total number of times an attempt to access a bean from the cache succeeded", "property": "CacheHitCount", }, "cache_miss_count": { "label": "Work Manager: The total number of times an attempt to access a bean from the cache failed", "property": "CacheMissCount", }, "passivation_count": { "label": "Work Manager: The total number of beans from this EJB Home that have been passivated", "property": "PassivationCount", }, }, }, # EJB Pool Metrics "ejb_pool": { "type": "EJBPoolRuntime", "metrics": { "access_total_count": { "label": "EJB Pool: The total number of times an attempt was made to get an instance from the free pool", "property": "AccessTotalCount", }, "beans_in_use_count": { "label": "EJB Pool: The total number of bean instances currently in use from the free pool", "property": "BeansInUseCount", }, "beans_in_use_current_count": { "label": "EJB Pool: The number of bean instances currently being used from the free pool", "property": "BeansInUseCurrentCount", }, "destroyed_total_count": { "label": "EJB Pool: The total number of times a bean instance from this pool was destroyed due to a non-application Exception being thrown from it", "property": "DestroyedTotalCount", }, "idle_beans_count": { "label": "EJB Pool: The total number of available bean instances in the free pool", "property": "IdleBeansCount", }, "miss_total_count": { "label": "EJB Pool: The total number of times a failed attempt was made to get an instance from the free pool", "property": "MissTotalCount", }, "pooled_beans_current_count": { "label": "EJB Pool: The current number of available bean instances in the free pool", "property": "PooledBeansCurrentCount", }, "timeout_total_count": { "label": "EJB Pool: The total number of Threads that have timed out waiting for an available bean instance from the free pool", "property": "TimeoutTotalCount", }, "waiter_current_count": { "label": "EJB Pool: The number of Threads currently waiting for an available bean instance from the free pool", "property": "WaiterCurrentCount", }, "waiter_total_count": { "label": "EJB Pool: The total number of Threads currently waiting for an available bean instance from the free pool", "property": "WaiterTotalCount", }, }, }, # EJB Transaction Metrics "ejb_transaction": { "type": "EJBTransactionRuntime", "metrics": { "transactions_committed_total_count": { "label": "EJB Transaction: The total number of transactions that have been committed for this EJB", "property": "TransactionsCommittedTotalCount", }, "transactions_rolled_back_total_count": { "label": "EJB Transaction: The total number of transactions that have been rolled back for this EJB", "property": "TransactionsRolledBackTotalCount", }, "transactions_timeout_total_count": { "label": "EJB Transaction: The total number of transactions that have timed out for this EJB", "property": "TransactionsTimedOutTotalCount", }, }, }, # Executive Queue Metrics "executive_queue": { "type": "ExecuteQueueRuntime", "metrics": { "execute_thread_current_idle_count": { "label": "Executive Queue: The number of idle threads assigned to the queue", "property": "ExecuteThreadCurrentIdleCount", }, "execute_thread_total_count": { "label": "Executive Queue: The total number of execute threads assigned to the queue", "property": "ExecuteThreadTotalCount", }, "pending_request_current_count": { "label": "Executive Queue: The number of waiting requests in the queue", "property": "PendingRequestCurrentCount", }, "pending_request_oldest_time": { "label": "Executive Queue: The time since the longest waiting request was placed in the queue", "property": "PendingRequestOldestTime", }, "serviced_request_total_count": { "label": "Executive Queue: The number of requests that have been processed by the queue", "property": "ServicedRequestTotalCount", }, }, }, # Servlet Metrics "servlet": { "type": "ServletRuntime", "metrics": { "execution_time_average": { "label": "Servlet: The average amount of time all invocations of the servlet have executed since created", "property": "ExecutionTimeAverage", }, "execution_time_high": { "label": "Servlet: The amount of time the single longest invocation of the servlet has executed since created", "property": "ExecutionTimeHigh", }, "execution_time_low": { "label": "Servlet: The amount of time the single shortest invocation of the servlet has executed since created", "property": "ExecutionTimeLow", }, "execution_time_total": { "label": "Servlet: The total amount of time all invocations of the servlet have executed since created", "property": "ExecutionTimeTotal", }, "invocation_total_count": { "label": "Servlet: The total count of the times this servlet has been invoked", "property": "InvocationTotalCount", }, "pool_max_capacity": { "label": "Servlet: The maximum capacity of this servlet for single thread model servlets", "property": "PoolMaxCapacity", }, "reload_total_count": { "label": "Servlet: The total count of the number of times this servlet has been reloaded", "property": "ReloadTotalCount", }, }, }, # Web App Component Metrics "web_app_component": { "type": "WebAppComponentRuntime", "metrics": { "open_sessions_current_count": { "label": "Web App Component: The current total number of open sessions in this module", "property": "OpenSessionsCurrentCount", }, "open_sessions_high_count": { "label": "Web App Component: The high water mark of the total number of open sessions in this server", "property": "OpenSessionsHighCount", }, "sessions_opened_total_count": { "label": "Web App Component: The total number of sessions opened", "property": "SessionsOpenedTotalCount", }, }, }, } def get_metric(config, type, property=None): set_env_command = "" if "wl_home" in config: set_env_command = "CLASSPATH=%s " % os.path.join( config["wl_home"], "server/lib/weblogic.jar" ) cmd = set_env_command + "java weblogic.Admin" if "username" in config: cmd += " -username %s" % config["username"] if "password" in config and config["password"].strip(): cmd += " -password %s" % config["password"].strip() cmd += " GET -pretty -type %s" % type if property: cmd += " -property %s" % property status, output = agent_util.execute_command(cmd) if status != 0: raise Exception(output) output = output.strip().split("\n")[-1] if not property: if output == "No MBeans found": raise Exception(output) else: return output else: parsed_output = output[output.index(":") + 1 :].strip() return parsed_output class WeblogicPlugin(agent_util.Plugin): textkey = "weblogic" label = "Oracle WebLogic Webserver" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if oracle weblogic is installed, running, and on path set_env_command = "" if "wl_home" in config: set_env_command = "CLASSPATH=%s " % os.path.join( config["wl_home"], "server/lib/weblogic.jar" ) installed = False if agent_util.which("java"): cmd = set_env_command + "java weblogic.Admin" if "username" in config: cmd += " -username %s" % config["username"] if "password" in config and config["password"].strip(): cmd += " -password %s" % config["password"].strip() status, output = agent_util.execute_command(cmd) if status == 0: installed = True if not installed: self.log.info("Oracle weblogic was not found installed or not on path") status = agent_util.UNSUPPORTED return {} if not config: msg = "The [weblogic] configuration block was not found in the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED and not ( "wl_home" in config and "username" in config and "password" in config ): msg = "Weblogic configuration parameters missing from the [weblogic] block of the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED and not os.path.exists( os.path.join(config["wl_home"], "server/bin/setWLSEnv.sh") ): msg = "Weblogic setWLSEnv.sh script not found" self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED: try: output = get_metric(config, "ApplicationRuntime") except: self.log.exception("error getting weblogic metric") status = agent_util.MISCONFIGURED msg = "Unable to access Weblogic metrics. Please double check your Weblogic login username and password as well as wl_home in the agent config file." data = {} for type, vals in metrics.items(): try: output = get_metric(config, vals["type"]) except: continue for property, meta in vals["metrics"].items(): textkey = "%s.%s" % (type, property) data[textkey] = { "label": meta["label"][:100], "options": meta.get("options", None), "status": status, "error_message": msg, } if "unit" in meta: data[textkey]["unit"] = meta["unit"] if not data: data = { "servlet": { "label": "WebLogic Servlets", "options": None, "status": status, "error_message": msg, } } return data def check(self, textkey, data, config): try: type, property = textkey.split(".") except: return False if type not in metrics or property not in metrics[type]["metrics"]: return False val = int( get_metric( config, metrics[type]["type"], metrics[type]["metrics"][property]["property"], ) ) self.log.debug("%s: %d" % (textkey, val)) return val weblogic12c.py000064400000046314151700142040007221 0ustar00import agent_util import os try: from jpype import java, javax import jpype except: jpype = None import traceback from agent_util import float metrics = { # Work Manager Metrics "work_manager": { "domain": "com.bea", "type": "WorkManagerRuntime", "option_key": "Name", "metrics": { "completed_requests": { "label": "Requests completed", "property": "CompletedRequests", "unit": "requests", }, "pending_requests": { "label": "Requests pending", "property": "PendingRequests", "unit": "requests", }, "stuck_thread_count": { "label": "Threads stuck", "property": "StuckThreadCount", "unit": "threads", }, }, }, # JDBC Data Source Metrics "jdbc_datasource": { "domain": "com.bea", "type": "JDBCDataSourceRuntime", "metrics": { "active_connections_current_count": { "label": "JDBC active connections", "property": "ActiveConnectionsCurrentCount", "unit": "connections", } }, }, # EJB Pool Metrics "ejb_pool": { "domain": "com.bea", "type": "EJBPoolRuntime", "metrics": { "access_total_count": { "label": "EJB pool access count", "property": "AccessTotalCount", "unit": "accesses", }, "beans_in_use_current_count": { "label": "EJB pool beans in use", "property": "BeansInUseCurrentCount", "unit": "beans", }, "destroyed_total_count": { "label": "EJB pool beans destroyed", "property": "DestroyedTotalCount", "unit": "beans", }, "idle_beans_count": { "label": "EJB pool idle beansl", "property": "IdleBeansCount", "unit": "beans", }, "miss_total_count": { "label": "EJB pool miss count", "property": "MissTotalCount", "unit": "attempts", }, "pooled_beans_current_count": { "label": "EJB pool available bean instances", "property": "PooledBeansCurrentCount", "unit": "beans", }, "timeout_total_count": { "label": "EJB pool thread timeouts", "property": "TimeoutTotalCount", "unit": "threads", }, "waiter_current_count": { "label": "EJB pool threads waiting", "property": "WaiterCurrentCount", "unit": "threads", }, }, }, # EJB Transaction Metrics "ejb_transaction": { "domain": "com.bea", "type": "EJBTransactionRuntime", "metrics": { "transactions_committed_total_count": { "label": "EJB transactions committed", "property": "TransactionsCommittedTotalCount", "unit": "transactions", }, "transactions_rolled_back_total_count": { "label": "EJB transactions rolled back", "property": "TransactionsRolledBackTotalCount", "unit": "transactions", }, "transactions_timeout_total_count": { "label": "EJB transactions timed out", "property": "TransactionsTimedOutTotalCount", "unit": "transactions", }, }, }, # Executive Queue Metrics "executive_queue": { "domain": "com.bea", "type": "ExecuteQueueRuntime", "metrics": { "execute_thread_current_idle_count": { "label": "Execute queue idle threads", "property": "ExecuteThreadCurrentIdleCount", "unit": "threads", }, "execute_thread_total_count": { "label": "Execute queue total threads", "property": "ExecuteThreadTotalCount", "unit": "threads", }, "pending_request_current_count": { "label": "Execute queue waiting requests", "property": "PendingRequestCurrentCount", "unit": "requests", }, "pending_request_oldest_time": { "label": "Execute queue oldest waiting request age", "property": "PendingRequestOldestTime", "unit": "milliseconds", }, "serviced_request_total_count": { "label": "Execute queue requests processed", "property": "ServicedRequestTotalCount", "unit": "requests", }, }, }, # Servlet Metrics "servlet": { "domain": "com.bea", "type": "ServletRuntime", "option_key": "Name", "metrics": { "execution_time_average": { "label": "Servlet average execution time", "property": "ExecutionTimeAverage", "unit": "milliseconds", }, "execution_time_high": { "label": "Servlet longest execution time", "property": "ExecutionTimeHigh", "unit": "milliseconds", }, "execution_time_low": { "label": "Servlet shortest execution time", "property": "ExecutionTimeLow", "unit": "milliseconds", }, "execution_time_total": { "label": "Servlet total execution time", "property": "ExecutionTimeTotal", "unit": "seconds", "scaleby": 0.001, }, "invocation_total_count": { "label": "Servlet total invocations", "property": "InvocationTotalCount", "unit": "invocations", }, "pool_max_capacity": { "label": "Servlet maximum capacity", "property": "PoolMaxCapacity", }, "reload_total_count": { "label": "Servlet reload count", "property": "ReloadTotalCount", "unit": "reloads", }, }, }, # Web App Component Metrics "web_app_component": { "domain": "com.bea", "type": "WebAppComponentRuntime", "option_key": "Name", "metrics": { "open_sessions_current_count": { "label": "Webapp current sessions", "property": "OpenSessionsCurrentCount", "unit": "sessions", }, "open_sessions_high_count": { "label": "Webapp maximum sessions", "property": "OpenSessionsHighCount", "unit": "sessions", }, "sessions_opened_total_count": { "label": "Webapp total sessions", "property": "SessionsOpenedTotalCount", "unit": "sessions", }, }, }, # JVM stats "jvm": { "domain": "com.bea", "type": "JVMRuntime", "metrics": { "uptime": { "label": "JVM uptime", "property": "Uptime", "unit": "seconds", "scaleby": 0.001, }, "heap_percent_free": { "label": "JVM heap percent free", "property": "HeapFreePercent", "unit": "percent", }, "heap_free": { "label": "JVM heap free", "property": "HeapFreeCurrent", "unit": "bytes", }, }, }, # JVM threading "jvm_threading": { "domain": "java.lang", "type": "Threading", "metrics": { "jvm_thread_count_peak": { "label": "Thread count - peak", "property": "PeakThreadCount", "unit": "threads", }, "jvm_thread_count_daemon": { "label": "Thread count - daemon", "property": "DaemonThreadCount", "unit": "threads", }, "jvm_thread_count_total_started": { "label": "Thread count - total started", "property": "TotalStartedThreadCount", "unit": "threads", }, "jvm_thread_count": { "label": "Thread count", "property": "ThreadCount", "unit": "threads", }, }, }, # # JVM Compilations "jvm_compilation": { "domain": "java.lang", "type": "Compilation", "metrics": { "jvm_compilation_time": { "label": "JVM compilation time", "property": "TotalCompilationTime", "unit": "seconds", "scaleby": 0.001, } }, }, # JVM Garbage Collector "jvm_gc": { "domain": "java.lang", "type": "GarbageCollector", "option_key": "name", "metrics": { "copy_count": { "label": "JVM GC count", "property": "CollectionCount", "unit": "collections", }, "copy_time": { "label": "JVM GC time", "property": "CollectionTime", "unit": "seconds", "scaleby": 0.001, }, }, }, } class WeblogicPlugin(agent_util.Plugin): textkey = "weblogic12c" label = "Oracle WebLogic 12c" @classmethod def get_jmx_connection(self, config): """ Establish a connection to the JMX endpoint on the server """ # Bail of we don't have the Jpype library if not jpype: return None classpath = config.get("classpath") if not jpype.isJVMStarted(): if classpath: jpype.startJVM( jpype.getDefaultJVMPath(), "-Djava.class.path=%s" % classpath ) else: jpype.startJVM(jpype.getDefaultJVMPath()) # Set authentication if provided in the configuration file jhash = java.util.HashMap() if "username" in config and "password" in config: jarray = jpype.JArray(java.lang.String)( [config["username"], config["password"]] ) jhash.put(javax.management.remote.JMXConnector.CREDENTIALS, jarray) # Build up the JMX URL from configuration pieces protocol = config.get("protocol", "iiop") hostname = config.get("hostname", "localhost") port = config.get("port", "7001") jndi_name = config.get("jndi_name", "weblogic.management.mbeanservers.runtime") url = "service:jmx:rmi:///jndi/%s://%s:%s/%s" % ( protocol, hostname, port, jndi_name, ) # Let customers override the full JMX URL if desired url = config.get("jmx_url", url) jmxurl = javax.management.remote.JMXServiceURL(url) jmxsoc = javax.management.remote.JMXConnectorFactory.connect(jmxurl, jhash) connection = jmxsoc.getMBeanServerConnection() return connection @classmethod def get_mbean_name( self, connection, bean_domain, bean_type, option_key=None, option_value=None ): """ Get a reference to a specific MBean, referenced by the type of bean. Requires doing a search through all current beans to find the right one. """ self.log.debug( "GETTING MBEAN NAME: %s %s %s %s" % (bean_domain, bean_type, option_key, option_value) ) for obj in connection.queryNames(None, None): name = obj.toString() domain, keys = name.split(":", 1) if domain != bean_domain: continue fields = keys.split(",") properties = {} for field in fields: key, value = field.split("=") properties[key.lower()] = value # We now have a mapping of the properties of the bean name. Check to see if we have # a match for what we're looking for if option_key: if ( properties.get("type") == bean_type and properties[option_key.lower()] == option_value ): return name elif properties.get("type") == bean_type: return name elif properties.get("type") == bean_type: return name return None @classmethod def get_mbean_options(self, connection, bean_domain, bean_type, option_key): """ Get the list of options for a given MBean type. """ options = [] self.log.debug( "LOOKING FOR OPTIONS FOR %s %s %s" % (bean_domain, bean_type, option_key) ) for obj in connection.queryNames(None, None): name = obj.toString() domain, keys = name.split(":", 1) if domain != bean_domain: continue fields = keys.split(",") properties = {} for field in fields: key, value = field.split("=") properties[key.lower()] = value if properties["type"] == bean_type and option_key.lower() in properties: options.append(properties[option_key.lower()]) return options @classmethod def get_metric(self, connection, mbean_name, attribute_name, scaleby=1.0): """ Get a attribute value coming from a specific JMX MBean """ self.log.debug("GETTING METRIC %s %s" % (mbean_name, attribute_name)) try: value = connection.getAttribute( javax.management.ObjectName(mbean_name), attribute_name ) value = value.floatValue() * scaleby return value except: return None @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # Check if WebLogic 12c has been enabled in the agnet config file if not config: self.log.info( "The [weblogic12c] configuration block was not found in the agent config file" ) status = agent_util.UNSUPPORTED return {} # Check if the Jpype library is available if not jpype: self.log.info( "The Jpype library is not installed - see http://jpype.readthedocs.io/en/latest/ for instructions" ) status = agent_util.UNSUPPORTED return {} if status == agent_util.SUPPORTED and not "classpath" in config: msg = "Weblogic configuration parameters missing from the [weblogic] block of the agent config file - classpath must be specified." status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED and ( "username" in config and not "password" in config ): msg = "Both username and password must be specified for WebLogic access" status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED and ( "password" in config and not "username" in config ): msg = "Both username and password must be specified for WebLogic access" status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED: # Make an actual call to get a value to ensure that everything works try: connection = self.get_jmx_connection(config) name = self.get_mbean_name(connection, "com.bea", "WorkManagerRuntime") value = self.get_metric(connection, name, "CompletedRequests") except: self.log.exception( "Error getting weblogic metric: %s" % traceback.format_exc() ) status = agent_util.MISCONFIGURED msg = "Unable to access Weblogic metrics. Please double check your WebLogic JMX access configuration in the agent config file." data = {} connection = self.get_jmx_connection(config) for type, vals in metrics.items(): for property, meta in vals["metrics"].items(): textkey = "%s.%s" % (type, property) data[textkey] = { "label": meta["label"][:100], "options": meta.get("options", None), "status": status, "error_message": msg, } # If the bean takes options, go off and find them based on the option key if "option_key" in vals: data[textkey]["options"] = self.get_mbean_options( connection, metrics[type]["domain"], metrics[type]["type"], metrics[type]["option_key"], ) # If the metric's unit is specified, copy that over if "unit" in meta: data[textkey]["unit"] = meta["unit"] # Check to make sure we can actually access the metric try: if "option_key" in vals and data[textkey]["options"]: name = self.get_mbean_name( connection, metrics[type]["domain"], metrics[type]["type"], vals["option_key"], data[textkey]["options"][0], ) else: name = self.get_mbean_name( connection, metrics[type]["domain"], metrics[type]["type"] ) self.log.debug("VERIFYING BEAN: %s" % name) value = self.get_metric(connection, name, meta["property"]) except: status = agent_util.MISCONFIGURED return data def check(self, textkey, option, config): try: type, property = textkey.split(".") except: # Invalid metric specification, can't collect anything return None if type not in metrics or property not in metrics[type]["metrics"]: return None domain = metrics[type]["domain"] metric = metrics[type]["metrics"][property]["property"] scaleby = metrics[type]["metrics"][property].get("scaleby", 1.0) connection = self.get_jmx_connection(config) if not connection: return None if option and "option_key" in metrics[type]: bean_name = self.get_mbean_name( connection, domain, metrics[type]["type"], metrics[type]["option_key"], option, ) else: bean_name = self.get_mbean_name(connection, domain, metrics[type]["type"]) return self.get_metric(connection, bean_name, metric, scaleby) __pycache__/__init__.cpython-36.pyc000064400000000170151700142040013131 0ustar003 ���i�@sdS)N�rrr�%/usr/lib/fm-agent/plugins/__init__.py�<module>s__pycache__/tomcat_jmx.cpython-36.pyc000064400000025337151700142040013553 0ustar003 ���ipL�@sHddlZddlZddlZddlmZmZeje�ZGdd�dej�Z dS)�N)�java�javaxc'@s�eZdZdZdZdZdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhd=�!Zed>d?��Ze d@dA��Z e dBdC��Ze dDdE��Ze dFdG��Z dS)i�TomcatJMXPluginzCTomcat Plugin for the FortiMonitor Agent using JMX to collect data.Z tomcat_jmxzTomcat (JMX)�Heap Memory Usage Used� java.lang�MemoryN�HeapMemoryUsage�used�bytes�Heap Memory Usage Committed� committed�Heap Memory Usage Init�init�Heap Memory Usage Max�max�Non-Heap Memory Usage Used�NonHeapMemoryUsage�Non-Heap Memory Usage Committed�Non-Heap Memory Usage Init�Non-Heap Memory Usage Max�Thread Count� Threading�ThreadCount�count�OS Process CPU Load�OperatingSystem�ProcessCpuLoad�percent�OS System CPU Load� SystemCpuLoad�OS Open File Descriptor Count�OpenFileDescriptorCount�Loaded Class Count�ClassLoading�LoadedClassCount� Eden Space� MemoryPool�Usage� PS Eden Space�Par Eden Space� G1 Eden Space�Survivor Space�PS Survivor Space�Par Survivor Space�G1 Survivor Space� PS Old Gen�CMS Old Gen� G1 Old Gen�Copy�GarbageCollector�CollectionCount�PS Scavenge�ParNew�G1 Young Generation�G1 Mixed Generation�MarkSweepCompact�PS MarkSweep�ConcurrentMarkSweep�G1 Old Generation)!zmemory.heapzmemory.heap.committedzmemory.heap.initzmemory.heap.maxzmemory.non_heapzmemory.non_heap.committedzmemory.non_heap.initzmemory.non_heap.maxzthreading.countzos.cpu_load.processzos.cpu_load.systemzos.open_file_descriptorszclass_loading.loaded_classeszmemory_pool.edenzmemory_pool.eden.pszmemory_pool.eden.parzmemory_pool.eden.g1zmemory_pool.survivorzmemory_pool.survivor.pszmemory_pool.survivor.parzmemory_pool.survivor.g1zmemory_pool.old.pszmemory_pool.old.cmszmemory_pool.old.g1z gc.young.copyzgc.young.ps_scavengezgc.young.par_newzgc.young.g1_generationzgc.mixed.g1_generationzgc.old.mark_sweep_compactzgc.old.ps_mark_sweepzgc.old.concurrent_mark_sweepzgc.old.g1_generationcCsP|d|d|d}}}d|}|r4|d|7}|rD|d|7}tjj|�S)a�returns a constructed ObjectName. :type tuple_: tuple (label, domain, type, bean_name, attribute_name, composite_data_key, unit) :param tuple_: A tuple with all the information for an ObjectName. A string that represents the label, a string that represents the domain, and so on and so forth. :rtype: javax.management.ObjectName :return: An ObjectName object that can be used to lookup a MBean. ���z%s:zname=%s,ztype=%s)r� managementZ ObjectName)�tuple_�domainZtype_Z bean_nameZcanonical_name�rC�'/usr/lib/fm-agent/plugins/tomcat_jmx.pyZ__get_object_name_from_tupleBs z,TomcatJMXPlugin.__get_object_name_from_tuplecCsdddddg}i}x`|D]X}|j|�}|rB|dkrBtd|��q|rT|d krTqqdd�|jd �D�}|||<qWi}|d}x�t|�D]�\} } d| i|| <xtdD]l}t|j|g��d kr�||| }n:t|j|g��d kr�||d}n|dkr�td|��nq�||| |<q�Wq�W|S)a� Parse the config object to build a structure of connections parameters based on the number of entries that are in each key. The main parameter we base on to split off is host. :type config: dict (host, port, username, password, jvm_path) :param config: Dictionary with the information stored in the config file. :rtype: Dict :return: Dictionary with connection information split up in multiple if needed. �host�port�username�password�jvm_pathz"Missing %s information from configcSsg|]}|jd��qS)� )�strip)�.0�valuerCrCrD� <listcomp>qsz?TomcatJMXPlugin.get_connections_from_config.<locals>.<listcomp>�,r=r)rGrH)rGrH)rFrGrHrI)rGrH)�get� ValueError�split� enumerate�len)�cls�config�keys�data�key� key_value�values�connections�hosts�indexrErMrCrCrD�get_connections_from_configZs0 z+TomcatJMXPlugin.get_connections_from_configcCs�tj}d}|s$d}|jj|�tj}d|ks4d|krPd|j}|jj|�tj}|jd�}|s�y,tj�}|s�tj}d|j}|jj|�Wn(tj}d|j}|jj|�YnXy"|tjkr�tj �r�tj |�Wn"tj}d}|jj|�YnX|tjk�r�y�tj ��stj |�tjj �}|jd ��rt|jd ��rttjtjj�|jd �|jd �g�}|jtjjjj|�d|jd�t|jd��f}tjjj|�}tjjjj||�} | j�} || dfStk �r�d}|jj|�YnX|d|fS) a� returns a list of connections from the jpype library - a python interface to the Java Native Interface. Wheter there are 1 or many connections depends on the number of entries in the host, port and optionally username/password/jvm entries. :type config: dict :param config: Mapping of information under the application block for this plugin. :rtype: tuple (status, connection, error_message) :return: A tuple containing a numeric value corresponding to the agent_util status'. A MBeanServerConnection object. And, a string with an error message if any. NzNo JMX configuration foundrErFzJMissing value in the [%s] block of the agent config file (e.g host, port).rIzYUnable to find JVM, please specify 'jvm_path' in the [%s] block of the agent config file.z;Unable to access JMX metrics because JVM cannot be started.rGrHz*service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmizBUnable to access JMX metrics, JMX is not running or not installed.)� agent_util� SUPPORTED�log�info� MISCONFIGURED�textkeyrP�jpypeZgetDefaultJVMPathZisJVMStartedZstartJVMr�utilZHashMapZJArray�lang�String�putrr@ZremoteZJMXConnectorZCREDENTIALS�intZ JMXServiceURLZJMXConnectorFactory�connectZgetMBeanServerConnection� Exception� exception)rUrV�status�msgrIZj_hashZj_array�urlZjmx_urlZjmx_soc� connectionrCrCrDZ__get_connection�sl z TomcatJMXPlugin.__get_connectionc Cs�i}|j|�}i}g}xT|j�D]H}|j|�\}}} d|d|df} | r`|jd| | f�q ||| <q W|j�s�|jjd�x|D]} |jj| �q�W|Stj }d} x|D]}|jj |�q�Wx�|jj�D]�\}} |j | �}g}xZ|j�D]N\} }y|j|�|j| �Wq�tk �r:|jjd|| f�w�Yq�Xq�Wt|j��dk�rn|�rnd }tj}d |} n d} tj }| d| d}}|||| |d �||<q�W|S)areturns a json object who's textkeys correspond to a given metric available on the JVM. :type config: dict :param config: Mapping of information under the application block for this plugin. :return: JSON Object for all metrics z%s:%srErFz%s %sz#Unable to connect to any connection�z.Tomcat (JMX) plugin - %s bean not found at %s.r=Nz Unreachable %s at any connectionr�)�label�optionsro� error_message�unit)r_r[� _TomcatJMXPlugin__get_connection�appendrWrbrc�errorr`ra�warning�JMX_MAPPING�items�,_TomcatJMXPlugin__get_object_name_from_tuple�getObjectInstancermrnrT�UNSUPPORTED)rUrV�resultZconfigsr\�errors�entryrorrrpZconnection_keyr{rYrA�object_namervrurxrCrCrD�get_metadata�sX zTomcatJMXPlugin.get_metadatac Cs|j|�}|rBx:|j�D]$}d|d|df}||kr|}qWn|d}|j|�\}}} | rr|jjd| �dS|jj|�} | d| d}}|j| �} yR|j| �}|j |j �|�}|jj}d |kr�|j �S|j|�s�dS|j|�}|j �S|jjd | �dSdS)a�returns a value for the metric. :type textkey: string :param textkey: Canonical name for a metric. :type data: string :param data: Specific option to check for. :type config: dict :param config: Mapping of information under the application block for this plugin. :rtype: double :return: Value for a specific metric z%s:%srErFrzFailed to get a connection: %sN��ZCompositeDataSupportz(Tomcat (JMX) plugin - %s bean not found.)r_r[ryrbrcr}rPrr�ZgetAttributeZ getObjectName� __class__�__name__Z floatValueZcontainsKey)rUrerXrV�entriesr�Zpossible_matchrorrrprAZattribute_nameZcomposite_data_keyr�Zobject_instanceZattribute_valueZattribute_class_nameZcheck_resultrCrCrD�check;s8 zTomcatJMXPlugin.check)rrrNrr r )rrrNrrr )r rrNrrr )rrrNrrr )rrrNrr r )rrrNrrr )rrrNrrr )rrrNrrr )rrrNrNr)rrrNrNr)rrrNrNr)r rrNr!Nr)r"rr#Nr$Nr)r%rr&r%r'r r )r(rr&r(r'r r )r)rr&r)r'r r )r*rr&r*r'r r )r+rr&r+r'r r )r,rr&r,r'r r )r-rr&r-r'r r )r.rr&r.r'r r )r/rr&r/r'r r )r0rr&r0r'r r )r1rr&r1r'r r )r2rr3r2r4Nr)r5rr&r5r4Nr)r6rr3r6r4Nr)r7rr3r7r4Nr)r8rr3r8r4Nr)r9rr3r9r4Nr)r:rr3r:r4Nr)r;rr3r;r4Nr)r<rr3r<r4Nr)r�� __module__�__qualname__�__doc__rerur}�staticmethodr�classmethodr_ryr�r�rCrCrCrDr s�.kHr) �loggingr`rfrr� getLoggerr��logger�PluginrrCrCrCrD�<module>s __pycache__/process.cpython-36.pyc000064400000016454151700142040013064 0ustar003 ���i�D� @sTddlZddlZddlZyddlZWndZYnXddlZGdd�dej�ZdS)�Nc@s0eZdZdZdZedd��Zdd�Zdd�Zd S) � ProcessPlugin�process�ProcesscCs�tj}d}dtjkr tj}d}nrdtjkr6tj}d}n\dtjkrLtj}d}nFtdkrl|jjd�tj}d}n&tj j d�s�|jjd�tj}d}iSdtjkr�d d||d dd�d d||d dd�dd||ddd�dd||ddd�d�}n�dd||d d�d d||d dd�dd||ddd�dd||ddd�dd||ddd�dd||ddd�d d||d dd�dd||ddd�dd||ddd�dd||ddd�dd||ddd�dd||ddd�d�}|S)N�aix�darwin�vmwarez=Unable to import psutil library, no process metrics availablezDUnable to import psutil library, please install and rebuild metadataz/procz/proc not foundzEnable procfs.zNumber of processes - name� processesT)�label�options�status� error_message�unit� option_stringz'Number of processes - full command linezProcess is running�booleanz&Process is running - full command line)zprocess.named_countzprocess.named_count.fullzprocess.existszprocess.exists.fullzNumber of processes running)r r rrr z%Memory percentage of processes - name�percentz"CPU percentage of processes - namez+Process Thread Count - executable name only�threadsz2Memory percentage of processes - full command linez2MB of memory used by processes - full command line�MBz/CPU percentage of processes - full command linez/Process Thread Count - executable name and args)zprocess.running_countzprocess.named_countzprocess.named_memory_percentagezprocess.named_cpu_percentagezprocess.existszprocess.thread_countzprocess.named_count.fullz$process.named_memory_percentage.fullz process.named_memory_raw_mb.fullz!process.named_cpu_percentage.fullzprocess.exists.fullzprocess.thread_count.full)� agent_util� SUPPORTED�sys�platform�psutil�log�info�UNSUPPORTED�os�path�exists)�self�configr�msg�metadata�r"�$/usr/lib/fm-agent/plugins/process.py�get_metadatas� zProcessPlugin.get_metadatac&sldtjksdtjk�r�dtjkr&d}ndtjkr4d}tj|�\}}|jd�}|jd�r`|jd�}|dkrtt|�dSd }g}x6|dd�D]&} || kr�|d7}|j| j�d �q�W|d kr�|S|dkr�|r�dSd Sn�|d3k�r�d } d }d }x�|D]�} dtjk�rd| }ndtjk�r d| }tj|�\}}|j �jd�}t|�dk�rN�q�|dj�}t |d �}t |d�}t |d�}| |7} ||7}||7}�q�W|dk�r�|S|dk�r�t |�dS| SdSdtjk�rxtjd�}d}|�s�|jj d�dS|jd��rd|}nd|}|d|7}tj|�\}}|jd�}|jd ��rRt|�dS|jd��rxt|�dd k�rtdSd Stdk�r�|jjd�dSd�|jd��r�|jd�}d�tj�}g}t|�}|jd ��rF�dk�rFxh|D]`}y0tj||j���r|j|jddddgd ��Wn(tjk �r<|jjd!��w�YnX�q�Wn�|jd ��rΈdk�r�x�|D]f}y6tj|d"j|j����r�|j|jddddgd ��Wn(tjk �r�|jjd!��wbYnX�qbWn^x\|D]T}y$|j|jdddd#d$d%d&gd ��Wn(tjk �r$|jjd!��w�YnX�q�Wx6|D].}|d�sLd|d<�q2d"j|d�|d<�q2W|jjd'|�|dk�s�|d(k�r�ttj��S|d k�rg}|jjd)|�x8|D]0}|�dk �r�tj||���r�|j|��q�W|jj|�t|�S|dk�rvg}|jjd)|�x8|D]0}|�dk �r$tj||���r$|j|��q$W|jj|�|�rndSd S�n�|d4k�r�g}|jjd)|�x8|D]0}|�dk �r�tj||���r�|j|��q�W|jj|�|jjd+�fd,d-�|D��|�sd Sd } d }d }d }d.tjk�rJ|j|�}|d#} |d$}|d%}|d&}n^xP|D]H} | d#}| d$}| d%j}| d&}| |7} ||7}||7}||7}�qPWt |�d5}|dk�r�|S|dk�r�|S|d*k�r�|S| Sn�|d6k�rh|�r�td0d-�|D��} nd1S|j ||�}!|!�s$|j!||| �dS|!d \}"}#| |#|"}$|j!||| �tj"�}%|%�s\d}%|$|%d2Sd S)7Nr�sunoszps axwwzps -eo pid' 'args� z.fullzprocess.running_count�rzprocess.named_countzprocess.exists�process.named_memory_percentage�process.named_cpu_percentage�named_memory_raw_mbzps -fp %s -o pcpu,pmem,rss��process.named_memory_raw_mbir�pgrep�z5Unable to find 'pgrep'! Unable to check for processesz%s -fz%sz %sz=Unable to import psutil library, no process metrics available�name�cmdline_str�pid�cmdline� cpu_times)�attrszUnable to get process.� �cpu_percent�memory_percent�memory_info�num_threadszAll running processes: %s �countzSearching processes for '%s'�process.thread_countzFound matching processes: %scsg|]}|��qSr"r")�.0�p)�searchr"r#� <listcomp>osz'ProcessPlugin.check.<locals>.<listcomp>r�!process.named_cpu_percentage.fullcSs$g|]}|jd�j|jd�j�qS)r3)�get�user�system)r<r=r"r"r#r?�sg�d)r(r)r*)r(r,r;i)r)r@)#rrr�execute_command�split�endswith�rstrip�len�append�strip�float�whichr�error� startswithrr�process_iter�str�rer>r/�as_dict� NoSuchProcess� exception�joinr2�debug�pids�findDarwinProcInfo�rss�sum�get_cache_results�cache_result� cpu_count)&r�textkey�datar�ps_cmd�retcode�outputr:rX�line�all_cpu�all_mem� all_raw_kbr1�ps�ret�fields�cpu�mem�raw_kbr-�cmd�out�process_objsr�proc�found_procs�all_raw_mem�all_thread_count�rv�mem_raw�thread_count�user_sum�last_result�delta�previous�time_used_result�number_of_coresr")r>r#�check�s� zProcessPlugin.checkcCs�g}x|D]}|jt|d��q Wtjdjdj|���\}}|jd�}|dd�}td�td�td�t|�d�}xh|D]`} | j�} t| �d krx|d t| d�7<|dt| d �7<|dt| d�7<qxW|d}t|�d|d<|S)z� On OSX, psutil will not report process information on processes belonging to other users, unless the requesting process is privileged. https://github.com/giampaolo/psutil/issues/883 r1zps uM -p {}�,r&r'Nr)r6r7r8r9�r6r+r7�r8�i) rJrQrrE�formatrVrFrLrI)rrrrX�fp�rcrc�lines� procLinesru�lr�mr"r"r#rY�s& z ProcessPlugin.findDarwinProcInfoN) �__name__� __module__�__qualname__r_r �classmethodr$r~rYr"r"r"r#rs#r)rrrrrR�Pluginrr"r"r"r#�<module>s __init__.py000064400000000000151700142040006635 0ustar00apache.py000064400000050113151700142040006331 0ustar00import agent_util from plugins.process import ProcessPlugin try: import ssl except: ssl = None try: # Python 2.x from httplib import HTTPConnection, HTTPSConnection except: from http.client import HTTPConnection, HTTPSConnection from library.log_matcher import LogMatcher import traceback # ON FREEBSD/CENTOS, THEY MAY NEED TO ADD THIS TO THEIR HTTPD.CONF/APACHE2.CONF: # LoadModule status_module libexec/apache22/mod_status.so # <IfModule status_module> # ExtendedStatus On # <Location /server-status> # SetHandler server-status # Order deny,allow # Allow from all # </Location> # </IfModule> class ApachePlugin(agent_util.Plugin): textkey = "apache" label = "Apache Webserver" DEFAULTS = { "server_status_protocol": "http", "server_status_host": "localhost", "server_status_url": "server-status", "apache_log_files": [ "/var/log/apache2/access.log", "/var/log/httpd/access.log", "/var/log/httpd-access.log", ], } LOG_COUNT_EXPRESSIONS = { "apache.4xx": r"4\d{2}", "apache.5xx": r"5\d{2}", "apache.2xx": r"2\d{2}", } @classmethod def get_data(self, textkey, ip, config): server_status_path = "" server_status_url = config.get("server_status_url") server_status_protocol = config.get("server_status_protocol") server_status_port = config.get("server_status_port", None) if not server_status_url.startswith("/"): server_status_path += "/" server_status_path += server_status_url + "?auto" if server_status_protocol == "https" and ssl is not None: if server_status_port is None: conn = HTTPSConnection(ip, context=ssl._create_unverified_context()) else: conn = HTTPSConnection( ip, int(server_status_port), context=ssl._create_unverified_context(), ) else: if server_status_port: conn = HTTPConnection(ip, server_status_port) else: conn = HTTPConnection(ip) try: conn.request("GET", server_status_path) r = conn.getresponse() output = r.read().decode() conn.close() except: self.log.info( """ Unable to access the Apache status page at %s%s. Please ensure Apache is running and the server status url is correctly specified. """ % (ip, server_status_path) ) self.log.info("error: %s" % traceback.format_exc()) return None data = dict() for line in output.splitlines(): if ":" not in line: continue k, v = line.split(": ", 1) data.update({k: v}) def get_param_value(data, param, output_type): try: return output_type(data[param]) except KeyError: return None if textkey.endswith("uptime"): return get_param_value(data, "Uptime", int) elif textkey.endswith("total_accesses"): return get_param_value(data, "Total Accesses", int) elif textkey.endswith("total_traffic"): val = get_param_value(data, "Total kBytes", int) # Convert to MB for backwards compatibility if val: return val / 1000.0 else: return None elif textkey.endswith("cpu_load"): return get_param_value(data, "CPULoad", float) elif textkey.endswith("connections"): return get_param_value(data, "ReqPerSec", float) elif textkey.endswith("transfer_rate"): val = get_param_value(data, "BytesPerSec", float) # Convert to MB for backwards compatibility return val / (1000.0**2) elif textkey.endswith("avg_request_size"): val = get_param_value(data, "BytesPerReq", float) # Convert to MB for backwards compatibility return val / (1000.0**2) elif textkey.endswith("workers_used_count"): return get_param_value(data, "BusyWorkers", int) elif textkey.endswith("workers_idle_count"): return get_param_value(data, "IdleWorkers", int) elif textkey in ("apache.workers_used", "apache.workers_idle"): busy = get_param_value(data, "BusyWorkers", int) idle = get_param_value(data, "IdleWorkers", int) if busy is None or idle is None: return None total = busy + idle if textkey.endswith("workers_used"): return float(100.0 * busy / total) elif textkey.endswith("workers_idle"): return float(100.0 * idle / total) @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None self.log.info("Looking for apache2ctl to confirm apache is installed") # look for either overrides on how to access the apache healthpoint or one of the apachectl bins if ( not agent_util.which("apache2ctl") and not agent_util.which("apachectl") and not config.get("from_docker") and not agent_util.which("httpd") ): self.log.info("Couldn't find apachectl or apache2ctl") status = agent_util.UNSUPPORTED msg = "Apache wasn't detected (apachectl or apache2ctl)" return {} # update default config with anything provided in the config file if config: new_config = self.DEFAULTS.copy() new_config.update(config) config = new_config # Look for Apache server-status endpoint server_status_path = "" server_status_protocol = config.get( "server_status_protocol", self.DEFAULTS["server_status_protocol"] ) server_status_url = config.get( "server_status_url", self.DEFAULTS["server_status_url"] ) server_status_port = config.get("server_status_port", None) if not server_status_url.startswith("/"): server_status_path += "/" server_status_path += server_status_url + "?auto" not_found_error = ( """ Unable to access the Apache status page at %s. Please ensure the status page module is enabled, Apache is running, and, optionally, the server status url is correctly specified. See docs.fortimonitor.forticloud.com/ for more information. """ % server_status_path ) host_list = [] # support optional comma delimitted addresses ip_list = config.get("server_status_host", "localhost") ip_list = ip_list.split(",") # loop over each IP from config and check to see if the status endpoint is reachable for ip in ip_list: ip_working = True try: if server_status_protocol == "https" and ssl is not None: if server_status_port is None: conn = HTTPSConnection( ip, context=ssl._create_unverified_context() ) else: conn = HTTPSConnection( ip, int(server_status_port), context=ssl._create_unverified_context(), ) else: if server_status_port: conn = HTTPConnection(ip, server_status_port) else: conn = HTTPConnection(ip) conn.request("GET", server_status_path) r = conn.getresponse() conn.close() except Exception: import sys _, err_msg, _ = sys.exc_info() ip_working = False self.log.info(not_found_error) self.log.info("error: %s" % err_msg) msg = not_found_error continue if r.status != 200: self.log.info(not_found_error) msg = not_found_error ip_working = False if ip_working: host_list.append(ip) output = r.read() if config.get("debug", False): self.log.info( "#####################################################" ) self.log.info("Apache server-status output:") self.log.info(output) self.log.info( "#####################################################" ) if not host_list: status = agent_util.MISCONFIGURED msg = not_found_error return {} # Checking log files access if not config.get("apache_log_files"): log_files = self.DEFAULTS.get("apache_log_files") else: log_files = config.get("apache_log_files") try: if type(log_files) in (str, unicode): log_files = log_files.split(",") except NameError: if type(log_files) in (str, bytes): log_files = log_files.split(",") can_access = False log_file_msg = "" log_file_status = status for log_file in log_files: try: opened = open(log_file, "r") opened.close() # Can access at least one file. Support log access can_access = True except Exception: import sys _, error, _ = sys.exc_info() message = ( "Error opening the file %s. Ensure the fm-agent user has access to read this file" % log_file ) if "Permission denied" in str(error): self.log.error(error) self.log.error(message) if log_file not in self.DEFAULTS.get("apache_log_files", []): self.log.error(error) self.log.error(message) log_file_msg = message log_file_status = agent_util.MISCONFIGURED if can_access: log_file_status = agent_util.SUPPORTED log_file_msg = "" metadata = { "apache.workers_used": { "label": "Workers - percent serving requests", "options": host_list, "status": status, "error_message": msg, "unit": "%", }, "apache.workers_idle": { "label": "Workers - percent idle", "options": host_list, "status": status, "error_message": msg, "unit": "%", }, "apache.workers_used_count": { "label": "Workers - count serving requests", "options": host_list, "status": status, "error_message": msg, "unit": "workers", }, "apache.workers_idle_count": { "label": "Workers - count idle", "options": host_list, "status": status, "error_message": msg, "unit": "workers", }, "apache.uptime": { "label": "Server uptime", "options": host_list, "status": status, "error_message": msg, "unit": "seconds", }, "apache.total_accesses": { "label": "Request count", "options": host_list, "status": status, "error_message": msg, "unit": "requests", }, "apache.total_traffic": { "label": "Total content served", "options": host_list, "status": status, "error_message": msg, "unit": "MB", }, "apache.cpu_load": { "label": "Percentage of CPU used by all workers", "options": host_list, "status": status, "error_message": msg, "unit": "%", }, "apache.connections": { "label": "Requests per second", "options": host_list, "status": status, "error_message": msg, "unit": "requests", }, "apache.transfer_rate": { "label": "Transfer rate", "options": host_list, "status": status, "error_message": msg, "unit": "MB/s", }, "apache.avg_request_size": { "label": "Request size average", "options": host_list, "status": status, "error_message": msg, "unit": "MB", }, "apache.2xx": { "label": "Rate of 2xx's events", "options": None, "status": log_file_status, "error_message": log_file_msg, "unit": "entries/s", }, "apache.4xx": { "label": "Rate of 4xx's events", "options": None, "status": log_file_status, "error_message": log_file_msg, "unit": "entries/s", }, "apache.5xx": { "label": "Rate of 5xx's events", "options": None, "status": log_file_status, "error_message": log_file_msg, "unit": "entries/s", }, "apache.is_running": { "label": "Apache is running", "options": None, "status": status, "error_message": msg, }, } return metadata @classmethod def get_metadata_docker(self, container, config): if "server_status_host" not in config: try: ip = agent_util.get_container_ip(container) config["server_status_host"] = ip except Exception: import sys _, e, _ = sys.exc_info() self.log.exception(e) config["from_docker"] = True return self.get_metadata(config) def get_apache_process_name(self): if agent_util.which("apache2ctl"): return "apache2" if agent_util.which("httpd"): return "httpd" if agent_util.which("httpd22"): return "httpd22" return None def check(self, textkey, ip, config): # update default config with anything provided in the config file new_config = self.DEFAULTS.copy() new_config.update(config) config = new_config # add backwards compatibility for older entries where IP was not an option. Pull from config instead. if not ip: ip = config["server_status_host"].split(",")[0] if textkey == "apache.is_running" and not config.get("from_docker"): apache_process = ProcessPlugin(None) apache_process_name = self.get_apache_process_name() if apache_process_name is not None: apache_is_running = apache_process.check( "process.exists", apache_process_name, {} ) return apache_is_running return None if textkey in ("apache.2xx", "apache.4xx", "apache.5xx"): file_inodes = {} total_metrics = 0 timescale = 1 column = 8 expression = self.LOG_COUNT_EXPRESSIONS.get(textkey) if not config.get("apache_log_files"): log_files = config["apache_log_files"] else: log_files = config.get("apache_log_files") try: if type(log_files) in (str, unicode): log_files = log_files.split(",") except NameError: if type(log_files) in (str, bytes): log_files = log_files.split(",") for target in log_files: try: file_inodes[target] = LogMatcher.get_file_inode(target) except OSError: import sys _, error, _ = sys.exc_info() if "Permission denied" in str(error): self.log.error( "Error opening the file %s. Ensure the fm-agent user has access to read this file" % target ) self.log.error(str(error)) if target not in self.DEFAULTS.get("apache_log_files", []): self.log.error(str(error)) self.log.error( "Error opening the file %s. Ensure the fm-agent user has access to read this file" % target ) continue log_data = self.get_cache_results( textkey, "%s/%s" % (self.schedule.id, target) ) if log_data: log_data = log_data[0][-1] else: log_data = dict() last_line_number = log_data.get("last_known_line") stored_inode = log_data.get("inode") results = log_data.get("results", []) try: total_lines, current_lines = LogMatcher.get_file_lines( last_line_number, target, file_inodes[target], stored_inode ) except IOError: import sys _, error, _ = sys.exc_info() self.log.error( "Unable to read the file %s. Ensure the fm-agent user has access to read this file" % target ) continue self.log.info( "Stored line %s Current line %s looking at %s lines" % (str(last_line_number), str(total_lines), str(len(current_lines))) ) log_matcher = LogMatcher(stored_inode) results = log_matcher.match_in_column(current_lines, expression, column) metric, results = log_matcher.calculate_metric(results, timescale) total_metrics += metric and metric or 0 self.log.info( 'Found %s instances of "%s" in %s' % (str(metric or 0), expression, target) ) previous_result = self.get_cache_results( textkey, "%s/%s" % (self.schedule.id, target) ) cache_data = dict( inode=file_inodes[target], last_known_line=total_lines, results=results, ) self.cache_result( textkey, "%s/%s" % (self.schedule.id, target), cache_data, replace=True, ) if not previous_result: return None else: delta, prev_data = previous_result[0] try: curr_count = cache_data.get("results")[0][-1] result = curr_count / float(delta) except IndexError: result = None return result else: return ApachePlugin.get_data(textkey, ip, config) def check_docker(self, container, textkey, ip, config): try: ip = agent_util.get_container_ip(container) except: return None return self.check(textkey, ip, config) apache_kafka.py000064400000033243151700142040007473 0ustar00import agent_util import logging import traceback logger = logging.getLogger(__name__) ### Mapping of JMX URI entries to their agent readable counterparts JMX_MAPPING = { "broker.bips.oneminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec", "OneMinuteRate", None, ), "broker.bips.fiveminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec", "FiveMinuteRate", None, ), "broker.bips.fifteenminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec", "FifteenMinuteRate", None, ), "broker.bips.meanrate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec", "MeanRate", None, ), "broker.bops.oneminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec", "OneMinuteRate", None, ), "broker.bops.fiveminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec", "FiveMinuteRate", None, ), "broker.bops.fifteenminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec", "FifteenMinuteRate", None, ), "broker.bops.meanrate": ( "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec", "MeanRate", None, ), "broker.mips.oneminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec", "OneMinuteRate", None, ), "broker.mips.fiveminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec", "FiveMinuteRate", None, ), "broker.mips.fifteenminuterate": ( "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec", "FifteenMinuteRate", None, ), "broker.mips.meanrate": ( "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec", "MeanRate", None, ), "underreplicatedpartitions": ( "kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions", "Value", None, ), "fetch.queue-size": ("kafka.server:type=Fetch", "queue-size", None), "memory.heap.committed": ("java.lang:type=Memory", "HeapMemoryUsage", "committed"), "memory.heap.used": ("java.lang:type=Memory", "HeapMemoryUsage", "used"), "memory.heap.max": ("java.lang:type=Memory", "HeapMemoryUsage", "max"), } #### def discover_beans(connection): allowed_beans = [ "kafka.server:type=BrokerTopicMetrics", "kafka.server:type=ReplicaManager", "kafka.log:type=LogFlushStats", "java.lang:type=Memory", "kafka.server:type=Fetch", ] ignored_topics = ["__consumer_offsets", "ReplicaFetcherThread-0-2"] discovered_beans = [] discovered_topics = [] avail_beans = connection.queryMBeans(None, None) for bean in avail_beans: name = bean.objectName.toString() if any(b in name for b in allowed_beans): discovered_beans.append(bean.toString()) if "topic" in name: topic = name.split(",")[2].split("=")[1] if topic not in ignored_topics: discovered_topics.append(topic) return discovered_beans, list(set(discovered_topics)) class ApacheKafkaPlugin(agent_util.Plugin): textkey = "apache_kafka_jmx" label = "Apache Kafka (JMX)" connection = None @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # Check for jmx configuration block if not config: self.log.info("No JMX configuration found") return {} # make sure jpype1 is installed first try: import jpype from jpype import java, javax except: msg = "Unable to access JMX metrics due to missing jpype library." self.log.info(msg) status = agent_util.MISCONFIGURED # Check for config setting sin jmx configuration block for key in ["port", "host"]: if key not in config: msg = ( "Missing value for %s in the [jmx] block of the agent config file." % key ) self.log.info(msg) status = agent_util.MISCONFIGURED # we'll need to get the default JVM path if not specified. If that doesn't work, throw an error if "jvm_path" not in config: try: jvm_path = jpype.getDefaultJVMPath() if not jvm_path: msg = "Unable to find JVM, please specify 'jvm_path' in the [jmx] block of the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED except: msg = "Unable to find JVM, please specify 'jvm_path' in the [jmx] block of the agent config file." self.log.info(msg) self.log.error(traceback.format_exc()) status = agent_util.MISCONFIGURED elif "jvm_path" in config: jvm_path = config["jvm_path"] try: if status == agent_util.SUPPORTED and not jpype.isJVMStarted(): jpype.startJVM(jvm_path) except: msg = "Unable to access JMX metrics because JVM cannot be started." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED: try: if not jpype.isJVMStarted(): jpype.startJVM(config["jvm_path"]) jhash = java.util.HashMap() if config.get("username") and config.get("password"): jarray = jpype.JArray(java.lang.String)( [config["username"], config["password"]] ) jhash.put(javax.management.remote.JMXConnector.CREDENTIALS, jarray) url = "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi" % ( config["host"], int(config["port"]), ) jmxurl = javax.management.remote.JMXServiceURL(url) # Gather the topics we can monitor jmxsoc = javax.management.remote.JMXConnectorFactory.connect( jmxurl, jhash ) self.connection = jmxsoc.getMBeanServerConnection() except: msg = ( "Unable to access JMX metrics, JMX is not running or not installed." ) self.log.info(msg) status = agent_util.MISCONFIGURED return {} beans, topics = discover_beans(self.connection) metadata = { "broker.bips.oneminuterate": { "label": "Topic Byes In/sec - 1 min", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.bips.fiveminuterate": { "label": "Topic Byes In/sec - 5 min", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.bips.fifteenminuterate": { "label": "Topic Byes In/sec - 15 min", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.bips.meanrate": { "label": "Topic Byes In/sec - Avg", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.bops.oneminuterate": { "label": "Topic Byes Out/sec - 1 min", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.bops.fiveminuterate": { "label": "Topic Byes Out/sec - 5 min", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.bops.fifteenminuterate": { "label": "Topic Byes Out/sec - 15 min", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.bops.meanrate": { "label": "Topic Byes Out/sec - Avg", "options": topics, "status": status, "error_message": msg, "unit": "bytes", }, "broker.mips.oneminuterate": { "label": "Topic Messages In/sec - 1 min", "options": topics, "status": status, "error_message": msg, "unit": "messages", }, "broker.mips.fiveminuterate": { "label": "Topic Messages In/sec - 5 min", "options": topics, "status": status, "error_message": msg, "unit": "messages", }, "broker.mips.fifteenminuterate": { "label": "Topic Messages In/sec - 15 min", "options": topics, "status": status, "error_message": msg, "unit": "messages", }, "broker.mips.meanrate": { "label": "Topic Messages In/sec - Avg", "options": topics, "status": status, "error_message": msg, "unit": "messages", }, "underreplicatedpartitions": { "label": "Replica Manager Unreplicated Partitions", "options": None, "status": status, "error_message": msg, "unit": "partitions", }, "fetch.queue-size": { "label": "Queued messages", "options": None, "status": status, "error_message": msg, }, "memory.heap.committed": { "label": "Heap Memory - Committed", "options": None, "status": status, "error_message": msg, "unit": "bytes", }, "memory.heap.used": { "label": "Heap Memory - Used", "options": None, "status": status, "error_message": msg, "unit": "bytes", }, "memory.heap.max": { "label": "Heap Memory - Max", "options": None, "status": status, "error_message": msg, "unit": "bytes", }, } return metadata def check(self, textkey, data, config): try: import jpype from jpype import java, javax except: self.log.error("Unable to import jpype! Is it installed?") return None try: # we'll need to get the default JVM path if not specified. If that doesn't work, throw an error if "jvm_path" not in config: try: jvm_path = jpype.getDefaultJVMPath() if not jvm_path: msg = "Unable to find JVM, please specify 'jvm_path' in the [jmx] block of the agent config file." self.log.info(msg) except: msg = "Unable to find JVM, please specify 'jvm_path' in the [jmx] block of the agent config file." self.log.info(msg) self.log.error(traceback.format_exc()) elif "jvm_path" in config: jvm_path = config["jvm_path"] if not jpype.isJVMStarted(): jpype.startJVM(jvm_path) jhash = java.util.HashMap() if config.get("username") and config.get("password"): jarray = jpype.JArray(java.lang.String)( [config["username"], config["password"]] ) jhash.put(javax.management.remote.JMXConnector.CREDENTIALS, jarray) url = "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi" % ( config["host"], int(config["port"]), ) jmxurl = javax.management.remote.JMXServiceURL(url) jmxsoc = javax.management.remote.JMXConnectorFactory.connect(jmxurl, jhash) connection = jmxsoc.getMBeanServerConnection() parts = JMX_MAPPING.get(textkey, None) if parts is None or not parts: self.log.error( "Unable to find Kafka metric %s in known metrics!" % textkey ) return None # start building the JMX object obj = parts[0] if data: obj += ",topic=%s" % data # get the actual metric attribute = parts[1] # if the metric is buried deeper in a dict, grab it val = parts[2] res = connection.getAttribute(javax.management.ObjectName(obj), attribute) log_msg = "Checking Kafka metric %s" % attribute if val is not None: return res.contents.get(val).floatValue() else: log_msg += " with key %s" % val return res.floatValue() self.log.debug(log_msg) except: self.log.critical( "Error checking Kafka metric %s - %s \n%s" % (textkey, data, traceback.format_exc()) ) return None apache_zookeeper.py000064400000014531151700142040010420 0ustar00import agent_util import sys import socket import traceback def netcat(hostname, port, content): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((hostname, port)) s.sendall(content) s.shutdown(socket.SHUT_WR) while 1: data = s.recv(1024) if data == "": break response = data break s.close() return response def nc_to_dict(data): parsed = {} a = data.split("\n") for l in a: try: b = l.split("\t") parsed[b[0]] = b[1] except: continue return parsed class ApacheZookeeperPlugin(agent_util.Plugin): textkey = "apache_zookeeper" label = "Apache Zookeeper" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if status == agent_util.SUPPORTED and ( not "host" in config or not "port" in config ): msg = "The host and port entries were not found in the [apache_zookeeper] block in the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED return {} if status == agent_util.SUPPORTED: response = None try: response = netcat(config["host"], int(config["port"]), "envi") except: self.log.exception("Error running Zookeeper hello") self.log.debug(traceback.format_exc()) status = agent_util.MISCONFIGURED msg = "" return {} if response is None or response == "": self.log.exception("Bad response running Zookeeper hello") return {} data = { # basic "avg_latency": { "label": "Average request latency", "options": None, "status": status, "error_message": msg, "unit": "ms", }, "max_latency": { "label": "Maximum request latency", "options": None, "status": status, "error_message": msg, "unit": "ms", }, "packets_received": { "label": "Packets received", "options": None, "status": status, "error_message": msg, "unit": "packets", }, "packets_sent": { "label": "Packets sent", "options": None, "status": status, "error_message": msg, "unit": "packets", }, "packets_received_per_sec": { "label": "Packets received/sec", "options": None, "status": status, "error_message": msg, "unit": "packets/sec", }, "packets_sent_per_sec": { "label": "Packets sent/sec", "options": None, "status": status, "error_message": msg, "unit": "packets/sec", }, "outstanding_requests": { "label": "Outstanding Requests", "options": None, "status": status, "error_message": msg, "unit": "", }, "server_state": { "label": "Server Mode", "options": None, "status": status, "error_message": msg, "unit": "", }, "znode_count": { "label": "Node count", "options": None, "status": status, "error_message": msg, "unit": "", }, "watch_count": { "label": "Watcher Count", "options": None, "status": status, "error_message": msg, "unit": "", }, "approximate_data_size": { "label": "Approximate data size", "options": None, "status": status, "error_message": msg, "unit": "bytes", }, "open_file_descriptor_count": { "label": "Open file descriptors", "options": None, "status": status, "error_message": msg, "unit": "files", }, "fsync_threshold_exceed_count": { "label": "Slow fsync count", "options": None, "status": status, "error_message": msg, "unit": "", }, "ruok": { "label": "Node error state", "options": None, "status": status, "error_message": msg, "unit": "", }, } return data def check(self, textkey, data, config): if textkey == "ruok": output = netcat(config["host"], int(config["port"]), "ruok") if output == "imok": return 0 else: return 1 else: output = netcat(config["host"], int(config["port"]), "mntr") data = nc_to_dict(output) key = "zk_" + str(textkey.replace("_per_sec", "")) value = data.get(key, False) if textkey == "server_state": if value == "follower": return 0 if value == "leader": return 1 if ( textkey == "packets_received_per_sec" or textkey == "packets_sent_per_sec" ): self.log.debug(data) if value > 0: cached = self.get_cache_results(textkey, None) self.cache_result(textkey, None, value, replace=True) self.log.debug("####\nGot cached result!\n%s" % cached) print("####\nGot cached result!\n%s" % cached) delta, c = cached[0] rate = (float(value) / float(c)) / float(delta) return rate else: return 0 if value: return float(value) else: return None bandwidth.py000064400000055134151700142040007064 0ustar00import agent_util import sys import os from datetime import datetime from agent_util import float import fnmatch class BandwidthPlugin(agent_util.Plugin): textkey = "bandwidth" label = "Bandwidth" @classmethod def get_metadata(self, config): def add_to_interfaces(iface): if any(fnmatch.fnmatch(iface, f) for f in filter_interfaces): return if type(interfaces) == set: interfaces.add(iface) else: interfaces.append(iface) status = agent_util.SUPPORTED msg = None interfaces = set() filter_interfaces = [] filter_interfaces_config = config.get("filter_interfaces") if filter_interfaces_config: if filter_interfaces_config[0] == "[": filter_interfaces_config = filter_interfaces_config.strip("[]") for f in filter_interfaces_config.split(","): f = f.strip() if f: if f[0] == '"': f = f.strip('"') elif f[0] == "'": f = f.strip("'") if f: filter_interfaces.append(f) if "freebsd" in sys.platform or "darwin" in sys.platform: netstat_binary = agent_util.which("netstat") if not netstat_binary: self.log.info("netstat not found") status = agent_util.UNSUPPORTED msg = "Please install netstat." return {} if status is agent_util.SUPPORTED: ret, output = agent_util.execute_command("%s -ib" % netstat_binary) self.log.debug("BANDWIDTH INFO") self.log.debug(output) if config.get("debug", False): self.log.debug( "#####################################################" ) self.log.debug("Bandwidth command 'netstat -ib' output:") self.log.debug(output) self.log.debug( "#####################################################" ) output = output.splitlines()[1:] for line in output: if line == "" or "lo" in line: continue stuff = line.strip().split() iface = stuff[0] add_to_interfaces(iface) elif "aix" in sys.platform: if not agent_util.which("netstat"): self.log.info("netstat not found") status = agent_util.UNSUPPORTED msg = "Please install netstat." return {} # Get the list of network devices interfaces = [] ret, output = agent_util.execute_command( "netstat -v | grep 'ETHERNET STATISTICS'" ) output = output.strip().split("\n") for line in output: fields = line.split() try: add_to_interfaces(fields[2].strip("(").strip(")")) except: pass metadata = {} metadata["bandwidth.kbytes.in"] = { "label": "Kilobytes IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.kbytes.out"] = { "label": "Kilobytes OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.packets.in"] = { "label": "Packets IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } metadata["bandwidth.packets.out"] = { "label": "Packets OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } return metadata elif "sunos" in sys.platform: if not agent_util.which("netstat"): self.log.info("netstat not found") status = agent_util.UNSUPPORTED msg = "Please install netstat." return {} # Get the list of network devices interfaces = [] ret, output = agent_util.execute_command("netstat -i") output = output.strip().split("\n") for line in output: fields = line.split() if not fields: continue if fields[0] in ("lo", "inet", "ether", "Name"): continue try: add_to_interfaces(fields[0]) except: pass metadata = {} metadata["bandwidth.kbytes.in"] = { "label": "Kilobytes IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.kbytes.out"] = { "label": "Kilobytes OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.packets.in"] = { "label": "Packets IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } metadata["bandwidth.packets.out"] = { "label": "Packets OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } return metadata elif "vmware" in sys.platform: if not agent_util.which("esxcli"): self.log.info("esxcli not found") status = agent_util.UNSUPPORTED msg = "Please confirm esxcli is installed." return {} interfaces = [] ret, out = agent_util.execute_command("esxcli network nic list") iface_table = out.split("\n") for iface in iface_table: # skip the headers, dividers and any empty items if iface.startswith("Name") or iface.startswith("--") or iface == "": continue add_to_interfaces(iface.split()[0]) metadata = {} metadata["bandwidth.kbytes.in"] = { "label": "Kilobytes IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.kbytes.out"] = { "label": "Kilobytes OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.packets.in"] = { "label": "Packets IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } metadata["bandwidth.packets.out"] = { "label": "Packets OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } return metadata elif "hp-ux" in sys.platform: ret, out = agent_util.execute_command("nwmgr -g") iface_table = out.splitlines() interfaces = [] for line in iface_table: if not line.lower().startswith("lan"): continue iface = line.split() if iface[1] != "UP": continue add_to_interfaces(iface[0]) metadata = {} metadata["bandwidth.kbytes.in"] = { "label": "Kilobytes IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.kbytes.out"] = { "label": "Kilobytes OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.packets.in"] = { "label": "Packets IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } metadata["bandwidth.packets.out"] = { "label": "Packets OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } return metadata else: # Default Linux options if not os.path.exists("/proc/net/dev"): self.log.info("/proc/net/dev not found") status = agent_util.UNSUPPORTED msg = "/proc/net/dev not found" return {} if status is agent_util.SUPPORTED: # get the interfaces output = open("/proc/net/dev", "r").read() output = output.splitlines() if config.get("debug", False): self.log.debug( "#####################################################" ) self.log.debug("Content of file '/proc/net/dev':") self.log.debug(str(output)) self.log.debug( "#####################################################" ) for line in output[2:]: stuff = line.strip().split() iface, bytes_read = stuff[0].split(":") add_to_interfaces(iface) interfaces = list(interfaces) interfaces.sort() if status is agent_util.SUPPORTED and not interfaces: status = agent_util.MISCONFIGURED msg = "No network interfaces found." metadata = {} metadata["bandwidth.kbytes.in"] = { "label": "Kilobytes IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.kbytes.out"] = { "label": "Kilobytes OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.packets.in"] = { "label": "Packets IN per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } metadata["bandwidth.packets.out"] = { "label": "Packets OUT per second", "options": interfaces, "status": status, "error_message": msg, "unit": "packets", } if "freebsd" not in sys.platform and "darwin" not in sys.platform: metadata["bandwidth.monthly.in"] = { "label": "Kilobytes IN for the month", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } metadata["bandwidth.monthly.out"] = { "label": "Kilobytes OUT for the month", "options": interfaces, "status": status, "error_message": msg, "unit": "kB", } return metadata def check(self, textkey, interface, config): interface_found = False if "freebsd" in sys.platform: ret, output = agent_util.execute_command("netstat -ib") lines = output.splitlines()[1:] for line in lines: if "lo" in line or line == "": continue line = line.split() ( si, mtu, network, addr, packets_read, ierr, idrop, bytes_read, packets_written, oerr, bytes_written, coll, ) = line packets_read = int(packets_read) packets_written = int(packets_written) kbytes_read = int(bytes_read) / 1024 kbytes_written = int(bytes_written) / 1024 if si == interface and network.startswith("<Link"): interface_found = True break elif "darwin" in sys.platform: netstat = agent_util.which("netstat") ret, output = agent_util.execute_command("%s -ibn" % netstat) lines = output.splitlines()[1:] for line in lines: if "lo" in line or line == "": continue line = line.split() si = line[0] network = line[2] packets_read = int(line[-7]) bytes_read = int(line[-5]) packets_written = int(line[-4]) bytes_written = int(line[-2]) kbytes_read = bytes_read / 1024 kbytes_written = bytes_written / 1024 if si == interface and network.startswith("<Link"): interface_found = True break elif "aix" in sys.platform: ret, output = agent_util.execute_command( 'netstat -v %s | grep "^Bytes:"' % interface ) fields = output.split() kbytes_written = int(fields[1]) / 1024 kbytes_read = int(fields[3]) / 1024 ret, output = agent_util.execute_command( 'netstat -v %s | grep "^Packets:"' % interface ) fields = output.split() packets_written = int(fields[1]) / 1024 packets_read = int(fields[3]) / 1024 interface_found = True elif "sunos" in sys.platform: ret, output = agent_util.execute_command("netstat -i") for line in output.strip().split("\n"): fields = line.strip().split() if not fields: continue if fields[0] == interface: interface_found = True packets_read = int(fields[4]) packets_written = int(fields[6]) kbytes_read = 0 kbytes_written = 0 ret, output = agent_util.execute_command( "kstat -n %s 1 2 | egrep 'bytes64' | uniq" % interface ) for line in output.split("\n"): if not line: continue if "obytes" in line: fields = line.split() kbytes_read = int(fields[1]) / 1024 elif "rbytes" in line: fields = line.split() kbytes_written = int(fields[1]) / 1024 elif "vmware" in sys.platform: ret, out = agent_util.execute_command( "esxcli network nic stats get -n %s" % interface ) if ret == 0: interface_found = True iface_stats = {} fields = out.split("\n") for i in fields: trans = i.strip().split(":") if len(trans) != 2: continue iface_stats[trans[0]] = trans[1].strip() kbytes_written = int(iface_stats["Bytes sent"]) / 1024 kbytes_read = int(iface_stats["Bytes received"]) / 1024 packets_written = int(iface_stats["Packets sent"]) packets_read = int(iface_stats["Packets received"]) elif "hp-ux" in sys.platform: interface_found = True # NOTE, we need to first divide by 8 to get bytes from octets # then divide by 1024 to get KB ret, out = agent_util.execute_command("nwmgr --st -c %s" % interface) nwmgr = out.splitlines() iface_stats = {} for line in nwmgr: if "=" not in line: continue l = line.split("=") iface_stats[l[0].strip()] = l[1].strip() self.log.debug(iface_stats) kbytes_read = (float(iface_stats["Inbound Octets"]) / 8) / 1024 kbytes_written = (float(iface_stats["Outbound Octets"]) / 8) / 1024 packets_read = total_pkts = ( float(iface_stats["Inbound Unicast Packets"]) + float(iface_stats["Inbound Multicast Packets"]) + float(iface_stats["Inbound Broadcast Packets"]) ) packets_written = total_pkts = ( float(iface_stats["Outbound Unicast Packets"]) + float(iface_stats["Outbound Multicast Packets"]) + float(iface_stats["Outbound Broadcast Packets"]) ) else: output = open("/proc/net/dev", "r").read() self.log.debug("/proc/net/dev output: %s" % str(output)) output = output.splitlines() for line in output[2:]: stuff = line.strip().split() if not stuff[0].endswith(":"): stuff[0], new_insert = stuff[0].split(":") stuff.insert(1, new_insert) else: stuff[0] = stuff[0].rstrip(":") iface = stuff[0] bytes_read = int(stuff[1]) bytes_written = int(stuff[9]) kbytes_read = int(bytes_read) / 1024 kbytes_written = int(stuff[9]) / 1024 packets_read = int(stuff[2]) packets_written = int(stuff[10]) if interface == iface: interface_found = True break # Special handling for monthly bandwidth - each time through we bank the # difference in the current reading vs. the previous, taking into account # cases where we cross a month boundary and when the byte counters wrap if textkey.startswith("bandwidth.monthly"): if textkey == "bandwidth.monthly.in": current_bytes = bytes_read else: current_bytes = bytes_written # First get the cached values c = self.get_cache_results(textkey + ":current_bytes", interface) previous_bytes = c and c[0][1] or current_bytes c = self.get_cache_results(textkey + ":banked_bytes", interface) banked_bytes = c and c[0][1] or 0 c = self.get_cache_results(textkey + ":current_month", interface) current_month = c and c[0][1] or datetime.now().month c = self.get_cache_results(textkey + ":current_year", interface) current_year = c and c[0][1] or datetime.now().year now = datetime.now() if now.year != current_year or now.month != current_month: # If we"ve crossed a month boundary, zero out bank and reset counters banked_bytes = 0 current_year = now.year current_month = now.month previous_bytes = current_bytes elif current_bytes < previous_bytes: # The OS counters wrapped, need to handle this banked_bytes += current_bytes previous_bytes = current_bytes else: # Standard case, just bank the difference between current and last banked_bytes += current_bytes - previous_bytes previous_bytes = current_bytes # Cache the new values self.cache_result( textkey + ":current_bytes", interface, current_bytes, replace=True ) self.cache_result( textkey + ":banked_bytes", interface, banked_bytes, replace=True ) self.cache_result( textkey + ":current_month", interface, current_month, replace=True ) self.cache_result( textkey + ":current_year", interface, current_year, replace=True ) return banked_bytes / 1024 # Because of AIX's interface naming convention, every # command provides a different interface name, we need to ignore that if "aix" in sys.platform: pass else: if not interface_found: self.log.warning("interface %s not found!?" % interface) return None # try to get the old cache cache = self.get_cache_results(textkey, interface) # cache our result always if textkey == "bandwidth.kbytes.in": result = kbytes_read elif textkey == "bandwidth.kbytes.out": result = kbytes_written elif textkey == "bandwidth.packets.in": result = packets_read elif textkey == "bandwidth.packets.out": result = packets_written else: self.log.error("UNKNOWN BANDWIDTH TEXTKEY- %s" % textkey) self.cache_result(textkey, interface, result) if not cache: return 0.0 delta, cached_result = cache[0] self.log.debug("kbytes read: %d" % kbytes_read) self.log.debug("kbytes written: %d" % kbytes_written) self.log.debug("packets read: %d" % packets_read) self.log.debug("packets written: %d" % packets_written) new_value = result - cached_result if new_value < 0.0: self.log.warning( "Metric {} current {} less than cached {} - maybe reboot or counter overflow".format( textkey, result, cached_result ) ) return None return new_value / float(delta) cassandra.py000064400000022722151700142040007054 0ustar00import agent_util import csv import os import sys if sys.version[0] == "3": from io import StringIO else: from StringIO import StringIO from agent_util import float textkey_mapping = {} def get_cmd_location(config): program_location = "" if "cassandra_installed_location" in config: cassandra_installed_location = config["cassandra_installed_location"] program_location = os.path.join(cassandra_installed_location, "bin") cmd = os.path.join(program_location, "nodetool") if not os.path.isfile(cmd): cmd = agent_util.which("nodetool") else: cmd = agent_util.which("nodetool", exc=True) return cmd def name_to_textkey(name): textkey = name.strip() textkey = textkey.replace(" ", "_") textkey = textkey.replace("(", "").replace(")", "") textkey_mapping[textkey] = name return textkey def get_cfstats(config, keyspace=None, table=None): cmd = get_cmd_location(config) cmd += " cfstats" if keyspace and table: cmd += " %s.%s" % (keyspace, table) status, output = agent_util.execute_command(cmd) if status != 0: raise Exception(output) lines = output.split("\n") data = {} current_keyspace = None current_table = None for line in lines: if line.startswith("Keyspace:"): current_keyspace = line.strip().split()[-1] current_table = None data[current_keyspace] = {} data[current_keyspace]["tables"] = {} continue if line.strip().startswith("Table:"): current_table = line.strip().split()[-1] data[current_keyspace]["tables"][current_table] = {} continue # Skip empty lines and "-------" divider lines if not line.strip(): continue if not line.replace("-", "").strip(): continue # Get the key/value pair on the line, get rid of "ms" and convert to a float. Use # try/except to convert NaN to None fields = line.strip().split(":") key = fields[0] value = fields[1] key = name_to_textkey(key) value = value.strip() value = value.replace("ms", "").strip() try: if value.lower() == "nan": raise Exception() value = float(value) except: value = None if current_table: data[current_keyspace]["tables"][current_table][key] = value else: data[current_keyspace][key] = value return data def get_cfhistograms(config, keyspace, table): cmd = get_cmd_location(config) cmd += " cfhistograms -- %s %s" % (keyspace, table) status, output = agent_util.execute_command(cmd) if status != 0: raise Exception(output) lines = output.split("\n") output = agent_util.StringIO(output) parsed_output = list(csv.reader(output, delimiter="\t")) indices = [i for i, x in enumerate(parsed_output) if x == []] cfhistograms_dict = dict( (parsed_output[i - 2][0], parsed_output[i - 1][0]) for i in indices if i - 2 >= 0 ) return cfhistograms_dict def get_tpstats(config): cmd = get_cmd_location(config) status, output = agent_util.execute_command(cmd + " tpstats") if status != 0: raise Exception(output) output = agent_util.StringIO(output) parsed_output = list(csv.reader(output, delimiter="\t")) header_line = [x.strip() for x in parsed_output[0][0].split(" ") if x] pool_tasks = [] dropped_message = [] header = [] for values in parsed_output[1:]: if values: v = [x.strip() for x in values[0].split(" ") if x] if len(v) == 2: if not header: header = [x.strip() for x in values[0].split(" ") if x] else: dropped_message.append(dict(zip(header, v))) else: pool_tasks.append(dict(zip(header_line, v))) return pool_tasks, dropped_message class CassandraPlugin(agent_util.Plugin): textkey = "cassandra" label = "Cassandra" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None program_location = "" if "cassandra_installed_location" in config: cassandra_installed_location = config["cassandra_installed_location"] program_location = os.path.join(cassandra_installed_location, "bin") # check if cassandra is even installed installed = agent_util.which("cassandra") if not installed: self.log.info("cassandra binary not found") status = agent_util.UNSUPPORTED msg = "cassandra binary not found" return {} tables = [] keyspaces = [] pool_tasks = [] dropped_message = [] if status is agent_util.SUPPORTED: try: cfstats = get_cfstats(config) keyspaces = cfstats.keys() for keyspace in cfstats: for table in cfstats[keyspace]["tables"]: tables.append(keyspace + "." + table) pool_tasks, dropped_message = get_tpstats(config) pool_tasks = [p["Pool Name"] for p in pool_tasks] dropped_message = [m["Message type"] for m in dropped_message] except: status = agent_util.MISCONFIGURED self.log.error("couldn't get cassandra stats") msg = "Couldn't get Cassandra stats, make sure cassandra is installed and cassandra configuration file is valid." tables.sort() keyspaces.sort() metadata = {} # Add keyspace-level metrics keyspace = keyspaces[0] for key in cfstats[keyspace]: if key != "tables": metadata[key] = { "label": textkey_mapping[key], "options": keyspaces, "status": status, "error_message": msg, } # Add table-level metrics table = cfstats[keyspace]["tables"].keys()[0] for key in cfstats[keyspace]["tables"][table].keys(): metadata[key] = { "label": textkey_mapping[key], "options": tables, "status": status, "error_message": msg, } """ metadata = { "thread_pool_task.active": { "label": "Thread pool task: active", "options": pool_tasks, "status": status, "error_message": msg }, "thread_pool_task.completed": { "label": "Thread pool task: completed", "options": pool_tasks, "status": status, "error_message": msg }, "thread_pool_task.blocked": { "label": "Thread pool task: blocked", "options": pool_tasks, "status": status, "error_message": msg }, "thread_pool_task.pending": { "label": "Thread pool task: pending", "options": pool_tasks, "status": status, "error_message": msg }, "dropped_messages": { "label": "Dropped Messages", "options": dropped_message, "status": status, "error_message": msg }, } """ return metadata def check(self, textkey, data, config): res = 0 if data: table = "" if len(data.split(".")) == 2: keyspace, table = data.split(".") else: keyspace = data if textkey in ["column_count", "row_size", "row_count"]: cfhistograms_dict = get_cfhistograms(config, keyspace, table) res = cfhistograms_dict[mapping[textkey]] if textkey == "column_count": try: res = res.split(" cells")[0] except: res = 0 else: if textkey == "row_size": try: res = res.split(" bytes: ")[0] except: res = 0 else: try: res = res.split(" bytes: ")[1] except: res = 0 elif "thread_pool_task" in textkey or textkey == "dropped_messages": pool_tasks, dropped_message = get_tpstats(config) if "thread_pool_task" in textkey: for p in pool_tasks: if p["Pool Name"] == data: res = p[textkey.split(".")[-1].title()] break else: for p in dropped_message: if p["Message type"] == data: res = p["Dropped"] break else: cfstats = get_cfstats(config, keyspace=keyspace, table=table) if keyspace in cfstats: if table: return cfstats[keyspace]["tables"][table].get(textkey) else: return cfstats[keyspace].get(textkey) # Shouldn't get here return None cert.py000064400000005617151700142040006056 0ustar00from subprocess import check_output, CalledProcessError import agent_util import logging import os from os import listdir from os.path import isfile, join import datetime """ Config file example: [cert] certpath = /path/to/the/certs """ class CertPlugin(agent_util.Plugin): textkey = "cert" label = "Cert Expiration Date" @classmethod def get_metadata(cls, config): try: certpath = config.get("certpath") if not os.path.exists(certpath): status = agent_util.UNSUPPORTED msg = "Missing cert directory in configuration" options = [] else: status = agent_util.SUPPORTED msg = [] options = CertPlugin.get_certs(certpath, config) except Exception: status = agent_util.UNSUPPORTED msg = "Exception gathering cert directory" options = [] metadata = { "days_to_expire": { "label": "Certificate Expiration Status", "options": options, "status": status, "error_message": msg, "unit": "days", }, } return metadata def check(self, textkey, data, config): self.log.debug("Checking cert expiration {} - {}".format(textkey, data)) day = CertPlugin._get_cert_data(config, data) return day @staticmethod def _get_cert_data(config, name): certpath = config.get("certpath") certs = CertPlugin.get_certs(certpath, config) for each in certs: if each != name: continue cert = certpath + "/" + each openssl = "openssl x509 -in " + cert + " -noout -enddate" try: ret, output = agent_util.execute_command(openssl) if ret != 0: logging.error(output) raise ValueError("Error Reading cert file") except ValueError: return False datestr = output.split("=")[-1] datespl = datestr.split("\n", 1)[0] dateformat = datetime.datetime.strptime(datespl, "%b %d %H:%M:%S %Y %Z") diff = dateformat - datetime.datetime.utcnow() day = diff.days return day @staticmethod def get_certs(certpath, config): # Collect all the .crt files in a list if not os.path.isdir(certpath): logging.error( "certpath specified on config file:" + certpath + " does not exist" ) else: certfiles = [ f for f in listdir(certpath) if isfile(join(certpath, f)) and f.endswith(".crt") ] logging.info( "Certificates found under" + str(certpath) + ":" + str(certfiles) ) return certfiles couch.py000064400000023073151700142040006216 0ustar00import agent_util try: # Python 2.x import httplib except: import http.client as httplib class CouchPlugin(agent_util.Plugin): textkey = "couch" label = "CouchDB" description = "Monitoring agent for CouchDB" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None response = None self.base_url = "/_stats" if not config: self.log.info("The [couch] config block is not found in the config file") return {} if "host" not in config or "port" not in config: msg = "The host and port settings were not found in the [couch] block of the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if "base_url" in config: self.base_url = config["base_url"] if status == agent_util.SUPPORTED: try: couch_client = httplib.HTTPConnection(config["host"], config["port"]) couch_client.request("GET", "/") response = couch_client.getresponse() except Exception: import sys _, exception, _ = sys.exc_info() status = agent_util.MISCONFIGURED msg = "Unable to connect to CouchDB server to request metrics" self.log.info("%s" % exception) if response and response.status != 200: status = agent_util.MISCONFIGURED mgs = "CouchDB Stats not found at %s:%s" % ( config["host"], config["port"], ) self.log.info(msg) return { "couchdb.database_writes": { "label": "Number of times a database was changed", "options": None, "status": status, "error_message": msg, "unit": "times", }, "couchdb.database_reads": { "label": "Number of times a document was read from a database", "options": None, "status": status, "error_message": msg, "unit": "times", }, "couchdb.open_databases": { "label": "Number of open databases", "options": None, "status": status, "error_message": msg, "unit": "databases", }, "couchdb.open_os_files": { "label": "Number of file descriptors CouchDB has open", "options": None, "status": status, "error_message": msg, "unit": "files", }, "couchdb.request_time": { "label": "Length of a request inside CouchDB without MochiWeb", "options": None, "status": status, "error_message": msg, "unit": "ms", }, "httpd.bulk_requests": { "label": "Number of bulk requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd.requests": { "label": "Number of HTTP requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd.temporary_view_reads": { "label": "Number of temporary view reads", "options": None, "status": status, "error_message": msg, "unit": "reads", }, "httpd.view_reads": { "label": "Number of view reads", "options": None, "status": status, "error_message": msg, "unit": "reads", }, "httpd_request_methods.COPY": { "label": "Number of HTTP COPY requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd_request_methods.DELETE": { "label": "Number of HTTP DELETE requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd_request_methods.GET": { "label": "Number of HTTP GET requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd_request_methods.HEAD": { "label": "Number of HTTP HEAD requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd_request_methods.MOVE": { "label": "Number of HTTP MOVE requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd_request_methods.POST": { "label": "Number of HTTP POST requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd_request_methods.PUT": { "label": "Number of HTTP PUT requests", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "httpd_status_codes.200": { "label": "Number of HTTP 200 OK responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.201": { "label": "Number of HTTP 201 Created responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.202": { "label": "Number of HTTP 202 Accepted responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.301": { "label": "Number of HTTP 301 Moved Permanently responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.304": { "label": "Number of HTTP 304 Not Modified responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.400": { "label": "Number of HTTP 400 Bad Request responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.401": { "label": "Number of HTTP 401 Unauthorized responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.403": { "label": "Number of HTTP 403 Forbidden responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.404": { "label": "Number of HTTP 404 Not Found responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.405": { "label": "Number of HTTP 405 Method Not Allowed responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.409": { "label": "Number of HTTP 409 Conflict responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.412": { "label": "Number of HTTP 412 Precondition Failed responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, "httpd_status_codes.500": { "label": "Number of HTTP 500 Internal Server Error responses", "options": None, "status": status, "error_message": msg, "unit": "responses", }, } def check(self, textkey, data, config): stat_area, stat_name = textkey.split(".") url = "/".join([self.base_url, stat_area, stat_name]) + "?range=60" try: couch_client = httplib.HTTPConnection(config["host"], config["port"]) couch_client.request("GET", url) except Exception: return None response = couch_client.getresponse() stat = agent_util.json_loads(response.read()) return stat[stat_area][stat_name]["current"] cpu_usage.py000064400000100136151700142040007064 0ustar00import agent_util import time import sys import platform import os import socket from agent_util import float try: import psutil except: psutil = None try: import distro except: distro = None def search_esxtop(headers, search_string): for idx, column in enumerate(headers): if search_string in column: return idx return None def get_cpu_metrics(cls): retcode, output = agent_util.execute_command("cat /proc/stat") cls.log.debug("cat /proc/stat output: %s" % str(output)) output = output.splitlines() stat_fields = [ "user", "nice", "system", "idle", "iowait", "irq", "softirq", "steal", "guest", "guest_nice", ] cpus = {} for line in output: if not line.startswith("cpu"): continue # python3 compatible lambda function if sys.version_info[0] == 3: parts = list(filter(lambda p: p, line.split(" "))) else: parts = filter(lambda p: p, line.split(" ")) core = parts[0] if core == "cpu": core = "Total" if len(parts) >= 11: user, nice, system, idle, iowait, irq, softirq, steal, guest, guest_nice = ( map(int, parts[1:11]) ) cpus[core] = { "user": user, "nice": nice, "system": system, "idle": idle, "iowait": iowait, "irq": irq, "softirq": softirq, "steal": steal, "guest": guest, "guest_nice": guest_nice, } elif len(parts) > 8 and len(parts) < 11: user, nice, system, idle, iowait, irq, softirq = map(int, parts[1:8]) cpus[core] = { "user": user, "nice": nice, "system": system, "idle": idle, "iowait": iowait, "irq": irq, "softirq": softirq, } return cpus class CPUUsagePlugin(agent_util.Plugin): textkey = "cpu_usage" label = "CPU" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if "aix" in sys.platform: status = agent_util.SUPPORTED data = { "load_average.1": { "label": "1 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.5": { "label": "5 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.15": { "label": "15 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "usage_percentage": { "label": "Usage percentage", "options": sorted(get_cpu_metrics(self).keys()), "status": status, "error_message": msg, "unit": "percent", }, "user_usage_percentage": { "label": "User usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "system_usage_percentage": { "label": "System usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "idle_usage_percentage": { "label": "Idle usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "iowait_usage_percentage": { "label": "I/O Wait usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "cpu_entitlement_percentage": { "label": "CPU entitlement percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, } return data elif "sunos" in sys.platform: status = agent_util.SUPPORTED data = { "load_average.1": { "label": "1 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.5": { "label": "5 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.15": { "label": "15 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "usage_percentage": { "label": "Usage percentage", "options": sorted(get_cpu_metrics(self).keys()), "status": status, "error_message": msg, "unit": "percent", }, "user_usage_percentage": { "label": "User usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "system_usage_percentage": { "label": "System usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "idle_usage_percentage": { "label": "Idle usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "iowait_usage_percentage": { "label": "I/O Wait usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, } return data elif "freebsd" in sys.platform or "darwin" in sys.platform: status = agent_util.SUPPORTED data = { "load_average.1": { "label": "1 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.5": { "label": "5 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.15": { "label": "15 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "usage_percentage": { "label": "Usage percentage", "options": ["Total"], "status": status, "error_message": msg, "unit": "percent", }, "user_usage_percentage": { "label": "User usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "system_usage_percentage": { "label": "System usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "idle_usage_percentage": { "label": "Idle usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, } return data elif "hp-ux" in sys.platform: status = agent_util.SUPPORTED metadata = { "load_average.1": { "label": "1 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.5": { "label": "5 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.15": { "label": "15 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "usage_percentage": { "label": "Total Usage percentage", "options": ["Total"], "status": status, "error_message": msg, "unit": "percent", }, "user_usage_percentage": { "label": "User usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "system_usage_percentage": { "label": "System usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "idle_usage_percentage": { "label": "Idle usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, } return metadata elif "vmware" in sys.platform: status = agent_util.SUPPORTED # here we're gathering the CPU cores that we can monitor and adding in a Total aggregation cpus = [] ret, out = agent_util.execute_command( 'esxcli hardware cpu list | grep "CPU:"' ) tmp_cpus = [x for x in out.split("\n") if x != ""] for c in tmp_cpus: cpu = "Cpu (%s)" % c.split(":")[1] cpus.append(cpu) cpus.append("Total") data = { "load_average.1": { "label": "1 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.5": { "label": "5 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.15": { "label": "15 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "usage_percentage": { "label": "Usage percentage", "options": cpus, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, "unit": "percent", }, "idle_usage_percentage": { "label": "Idle usage percentage", "options": cpus, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, "unit": "percent", }, } return data else: if psutil is None: # Unable to import psutil self.log.info( "Unable to import psutil library, no process metrics available" ) status = agent_util.UNSUPPORTED msg = "Unable to import psutil library, please install and rebuild metadata" # Core Linux if not agent_util.which("top", exc=False): self.log.info("top binary not found") status = agent_util.UNSUPPORTED msg = "top binary not found" try: distro_info = platform.dist() except AttributeError: if distro: distro_info = distro.linux_distribution() distro_info = ". ".join(distro_info) else: raise ValueError( "Unable to grab distribution information. Please verify dependencies. Distro for Python3.8" ) if ( "centos" in distro_info or "redhat" in distro_info or "oracle" in distro_info ) and not agent_util.which("iostat", exc=False): self.log.info("Missing sysstat package.") status = agent_util.UNSUPPORTED msg = "iostat/sysstat binary not found. Please install" metadata = { "load_average.1": { "label": "1 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.5": { "label": "5 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "load_average.15": { "label": "15 minute CPU load average", "options": None, "status": status, "error_message": msg, "unit": "avg", }, "usage_percentage": { "label": "Usage percentage", "options": sorted(get_cpu_metrics(self).keys()), "status": status, "error_message": msg, "unit": "percent", }, "user_usage_percentage": { "label": "User usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "system_usage_percentage": { "label": "System usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "idle_usage_percentage": { "label": "Idle usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "iowait_usage_percentage": { "label": "I/O Wait usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "irq_usage_percentage": { "label": "Hardware IRQ usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "softirg_usage_percentage": { "label": "Software IRQ usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "stealtime_usage_percentage": { "label": "Steal Time usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "nice_usage_percentage": { "label": "Nice usage percentage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, } return metadata def check(self, textkey, data, config={}): # AIX-specific logic if ( "aix" in sys.platform or "darwin" in sys.platform or "freebsd" in sys.platform ): if textkey.startswith("load_average"): retcode, load = agent_util.execute_command("uptime") fields = load.strip().split() if textkey == "load_average.1": return float(fields[-3].strip(",")) elif textkey == "load_average.5": return float(fields[-2].strip(",")) elif textkey == "load_average.15": return float(fields[-1]) else: return None else: iostat = str(agent_util.which("iostat")) if "aix" in sys.platform: retcode, output = agent_util.execute_command( iostat + " | grep -p tty" ) if "darwin" in sys.platform or "freebsd" in sys.platform: retcode, output = agent_util.execute_command( iostat + " -C -c 2 | tail -1" ) output = output.strip().split("\n") self.log.debug("iostat output: %s" % output) iostatline = False enti = False entc = 0 inuse = 0 user = 0 system = 0 idle = 0 iowait = 0 for line in output: if line.startswith("tty"): iostatline = True if "entc" in line.split()[-1]: enti = True continue fields = line.split() if "darwin" in sys.platform: user = float(fields[-6]) system = float(fields[-5]) idle = float(fields[-4]) elif "freebsd" in sys.platform: user = float(-5) idle = float(fields[-1]) system = float(fields[-3]) else: user = float(fields[2]) system = float(fields[3]) idle = float(fields[4]) iowait = float(fields[5]) if enti == True: entc = float(fields[-1]) inuse = 100.0 - idle if textkey == "usage_percentage": return inuse elif textkey == "user_usage_percentage": return user elif textkey == "system_usage_percentage": return system elif textkey == "idle_usage_percentage": return idle elif textkey == "iowait_usage_percentage": return iowait elif textkey == "cpu_entitlement_percentage" and enti == True: return entc # If we got here, we don't know how to gather this metric # for AIX - return None return None elif "sunos" in sys.platform: if textkey.startswith("load_average"): retcode, load = agent_util.execute_command("uptime") fields = load.strip().split() if textkey == "load_average.1": return float(fields[-3].strip(",")) elif textkey == "load_average.5": return float(fields[-2].strip(",")) elif textkey == "load_average.15": return float(fields[-1]) else: return None retcode, output = agent_util.execute_command("mpstat") output = output.split("\n") for line in output: if "CPU" in line or not line: continue fields = line.split() if textkey == "usage_percentage": return 100.0 - float(fields[-1]) elif textkey == "user_usage_percentage": return float(fields[-4]) elif textkey == "system_usage_percentage": return float(fields[-3]) elif textkey == "idle_usage_percentage": return float(fields[-1]) elif textkey == "iowait_usage_percentage": return float(fields[-2]) # If we got here we don't know how to gather this metric for Solaris return None elif "vmware" in sys.platform: hostname = socket.gethostname() search_string = "\\\\%s\\Physical " % hostname metric_value = None # actually gather the data to parse ret, out = agent_util.execute_command( "esxtop -b -n 2 -d 2", cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) out_list = out.split("\n") headers = out_list[0].replace('"', "").split(",") esxtop_data = [] for idx, val in enumerate(out_list[::1]): if not val or val == "": continue esxtop_data = out_list[idx].replace('"', "").split(",") # finish building search string if textkey.startswith("load_average"): search_string += ( "Cpu Load\\Cpu Load (%s Minute Avg)" % textkey.split(".")[-1] ) elif data and ( textkey == "usage_percentage" or textkey == "idle_usage_percentage" ): if data == "Total": search_string += "Cpu(_Total)" else: search_string += data search_string += "\\% Processor Time" # find index from headers and match to esxtop_data collected search_idx = search_esxtop(headers, search_string) if not search_idx: self.log.error("Unable to parse ESXTOP output for %s" % search_string) return None if textkey == "idle_usage_percentage": metric_value = 100 - float(esxtop_data[search_idx]) else: metric_value = float(esxtop_data[search_idx]) return metric_value elif "hp-ux" in sys.platform: # add terminal specification for hpux os.environ["TERM"] = "xterm" # !!! applicable to HP-UX 11.31 !!! ret, out = agent_util.execute_command("top -s2 -d2", env=os.environ) top = out.strip().splitlines() self.log.debug(top) metric_mapping = {} cpu_str = "" load_str = "" for line in top: if line.lower().startswith("avg"): cpu_str = line elif line.lower().startswith("load averages"): load_str = line cpu = cpu_str.replace("%", "").split() self.log.debug(cpu) metric_mapping["user_usage_percentage"] = float(cpu[2]) metric_mapping["system_usage_percentage"] = float(cpu[4]) metric_mapping["idle_usage_percentage"] = float(cpu[5]) metric_mapping["usage_percentage"] = ( 100.0 - metric_mapping["idle_usage_percentage"] ) load = load_str.strip().replace(",", "").split() self.log.debug(load) self.log.debug("'%s'" % load[4][:4]) metric_mapping["load_average.1"] = float(load[2]) metric_mapping["load_average.5"] = float(load[3]) metric_mapping["load_average.15"] = float(load[4][:4]) return float(metric_mapping.get(textkey, None)) else: if psutil is None: self.log.error("PSUTIL PACKAGE MISSING! UNABLE TO COLLECT CPU METRICS") return None # Default Linux/FreeBSD logic if textkey.startswith("load_average"): retcode, output = agent_util.execute_command("top -b -n 2 -d 0.5") if config.get("debug", False): self.log.debug( "#####################################################" ) self.log.debug("CPU usage command 'top -b -n 2 -d 0.5:") self.log.debug(str(output)) self.log.debug( "#####################################################" ) self.log.debug("top -b -n 2 -d 0.5: %s" % str(output)) output = output.splitlines() space_index = [0] for var, item in enumerate(output): if item == "": space_index.append(var) tmp_out = [] for line in output[space_index[2] :]: if line.strip(): tmp_out.append(line) output = tmp_out if textkey.startswith("load_average"): fields = output[0].split() if textkey == "load_average.1": index = -3 elif textkey == "load_average.5": index = -2 elif textkey == "load_average.15": index = -1 return float(fields[index].strip(",")) elif textkey.endswith("usage_percentage") and textkey != "usage_percentage": num_cores = psutil.cpu_count() usage_textkey_map = { "user_usage_percentage": "user", "system_usage_percentage": "system", "idle_usage_percentage": "idle", "iowait_usage_percentage": "iowait", "irq_usage_percentage": "irq", "softirg_usage_percentage": "softirq", "stealtime_usage_percentage": "steal", "nice_usage_percentage": "nice", } key_name = usage_textkey_map.get(textkey, None) if key_name is None: self.log.error("Unknown resource textkey '%s'!" % textkey) return None c = self.get_cache_results("psutil", "detailed_cpu_usage") self.log.debug("Retrieved cached value:\n%s" % c) cur_cpu = psutil.cpu_times() self.log.debug( "Retrieved instant value:\n%s" % getattr(cur_cpu, key_name) ) last_cpu = c and c[0][1] or None self.cache_result("psutil", "detailed_cpu_usage", cur_cpu, replace=True) if last_cpu is None: return None use_diff = ( getattr(cur_cpu, key_name) - getattr(last_cpu, key_name) ) / num_cores if use_diff < 0: # The system was likely rebooted, and the cached # CPU stats are no longer relevant. # Cache new values and exit without reporting a value. return None elapsed = c[0][0] usage_time = (use_diff / elapsed) * 100.0 return usage_time elif textkey == "usage_percentage" and data.lower() == "total": num_cores = psutil.cpu_count() c = self.get_cache_results("psutil", "total_cpu_usage") self.log.debug("Retrieved cached value:\n%s" % c) cur_cpu = psutil.cpu_times() self.log.debug("Retrieved instant value:\n%s" % cur_cpu.idle) last_cpu = c and c[0][1] or None self.cache_result("psutil", "total_cpu_usage", cur_cpu, replace=True) if last_cpu is None: return None idle_diff = (cur_cpu.idle - last_cpu.idle) / num_cores steal_diff = (cur_cpu.steal - last_cpu.steal) / num_cores if idle_diff < 0 or steal_diff < 0: # The system was likely rebooted, and the cached # CPU stats are no longer relevant. # Cache new values and exit without reporting a value. return None use_diff = idle_diff + steal_diff # Instead of using the time between cached calculate the exact time # between measures by substracting the sum of the current clock time vs # the previous clock time. This avoid issues where our usage was too small # and the seconds of the extra cache would give a negative result elapsed = (sum(cur_cpu) - sum(last_cpu)) / float(num_cores) usage_time = 100 - ((use_diff / elapsed) * 100.0) return usage_time elif textkey == "usage_percentage" and data.lower() != "total": self.log.debug("Checking for core %s" % data) num_cores = psutil.cpu_count() c = self.get_cache_results("psutil", "%s_cpu_usage" % data) self.log.debug("Retrieved cached value:\n%s" % c) try: cur_cpu = psutil.cpu_times(percpu=True)[int(str(data).strip("cpu"))] except IndexError: self.log.critical("UNABLE TO FIND CPU #%s" % data) return None self.log.debug("Retrieved instant value:\n%s" % cur_cpu.idle) last_cpu = c and c[0][1] or None self.cache_result( "psutil", "%s_cpu_usage" % data, cur_cpu, replace=True ) if last_cpu is None: return None idle_diff = cur_cpu.idle - last_cpu.idle steal_diff = cur_cpu.steal - last_cpu.steal if idle_diff < 0 or steal_diff < 0: # The system was likely rebooted, and the cached # CPU stats are no longer relevant. # Cache new values and exit without reporting a value. return None use_diff = idle_diff + steal_diff elapsed = sum(cur_cpu) - sum(last_cpu) usage_time = 100 - ((use_diff / elapsed) * 100.0) return usage_time return 0 dem_plugin.py000064400000016760151700142040007245 0ustar00import agent_util import ipc_client from datetime import datetime import json import logging import sys class DEMPlugin(agent_util.Plugin): textkey = "dem" label = "Digital Experience" log = logging.getLogger(__name__) wifi_client = ipc_client.DEMClient(dem_port="demservice") @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None dem_configured = True if config.get("enabled", "false").lower() != "true": status = agent_util.UNSUPPORTED msg = "DEM not configured" dem_configured = False if dem_configured: info = self.wifi_client.get_dem_wifi_info() if not info: status = agent_util.UNSUPPORTED msg = "No wifi info found" metadata = { "dem.agrCtlRSSI": { "label": "Wifi signal strength", "options": None, "status": status, "error_msg": msg, "unit": "dBm", }, "dem.downloadspeed": { "label": "Download Speed", "options": None, "status": agent_util.SUPPORTED, "error_msg": None, "unit": "Mbit/s", }, "dem.lastTxRate": { "label": "Last transmission rate", "options": None, "status": status, "error_msg": msg, "unit": "mbps/s", }, } if "darwin" == sys.platform: metadata["dem.agrCtlNoise"] = { "label": "Noise", "options": None, "status": status, "error_msg": msg, "unit": "dBm", } metadata["dem.MCS"] = { "label": "MCS Index", "options": None, "status": status, "error_msg": msg, "unit": "index", } speedtest_upload = agent_util.SUPPORTED speedtest_msg = None if ( config.get("speedtest_mode", "speedtest").lower() != "iperf3" or not dem_configured or "darwin" != sys.platform ): speedtest_upload = agent_util.UNSUPPORTED speedtest_msg = "Upload speed not supported" metadata["dem.uploadspeed"] = { "label": "Upload Speed", "options": None, "status": speedtest_upload, "error_msg": speedtest_msg, "unit": "Mbit/s", } battery_status = agent_util.SUPPORTED battery_msg = None if not dem_configured: battery_status = agent_util.UNSUPPORTED battery_msg = "DEM not configured" else: battery_pct = self.get_battery_remaining() if battery_pct is None: battery_status = agent_util.UNSUPPORTED battery_msg = "Battery metric unavailable" metadata["dem.battery_percent_remaining"] = { "label": "Battery Percent Remaining", "options": None, "status": battery_status, "error_msg": battery_msg, "unit": "percent", } return metadata def check(self, textkey, data, config): if config.get("enabled", "false").lower() != "true": return None if textkey in ("dem.downloadspeed", "dem.uploadspeed"): return self._measureNetworkSpeed(textkey, config) if "dem.battery_percent_remaining" == textkey: return self.get_battery_remaining() try: info = self.wifi_client.get_dem_wifi_info() if not info: raise Exception("No wifi info received") key = textkey[len("dem.") :] metric_value = info.get(key, None) if metric_value is None: raise Exception("Missing key {}".format(key)) return float(metric_value) except Exception as e: self.log.error("Wifi metrics error: {}".format(str(e))) return None def _run_iperf3_test(self, textkey, config): speedtest_bin = "/usr/local/FortiMonitor/agent/latest/bin/iperf3" try: start_url = config.get("iperf3_start_url", None) from iperf3 import Iperf3Runner # Old configuration included an start http request. No longer needed. start_url = start_url.removeprefix("https://").removesuffix("/start_iperf") runner = Iperf3Runner(iperf3_bin=speedtest_bin, url=start_url) result = None if "dem.downloadspeed" == textkey: result = runner.get_download_speed() elif "dem.uploadspeed" == textkey: result = runner.get_upload_speed() return float(result / (1000 * 1000)) except: self.log.exception("Iperf3 error:") def _measureNetworkSpeed(self, textkey, config): if config.get("speedtest_mode", "speedtest").lower() == "iperf3": return self._run_iperf3_test(textkey, config) if "dem.uploadspeed" == textkey: raise Exception("Service does not support upload speed") try: import speedtest start = datetime.now() s = speedtest.Speedtest() s.get_best_server() k = s.download() rv = float(k / 1000000) self.log.info( "Download speed {} in {:.2f}".format( rv, (datetime.now() - start).total_seconds() ) ) return rv except Exception as ste: self.log.error("Speedtest exception: {}".format(ste)) return None @classmethod def get_battery_remaining(self): if "darwin" == sys.platform: try: sp = agent_util.which("system_profiler") power_tk = "SPPowerDataType" output = agent_util.run_command([sp, "-json", power_tk]) data = json.loads(output) for d in data[power_tk]: if d.get("_name", "") == "spbattery_information": mv = d.get("sppower_battery_charge_info", None) if not mv: return None return float(mv["sppower_battery_state_of_charge"]) return None except Exception as e: self.log.exception(e) return None elif "linux" == sys.platform: return self.get_battery_percent_linux() else: return None @classmethod def get_battery_percent_linux(self): try: proc_args = ["upower", "-i", "/org/freedesktop/UPower/devices/battery_BAT0"] output = agent_util.run_command(proc_args).strip().splitlines() seen_battery = False seen_present = False percentage = None for line in output: line = line.strip() if "battery" == line: seen_battery = True elif "present:" in line: seen_present = True elif "percentage:" in line: percentage = line.split(":")[1].strip().rstrip("%") break if seen_battery and seen_present: return float(percentage) return None except Exception as e: self.log.exception(e) return None disk.py000064400000041465151700142040006054 0ustar00import agent_util import sys import os import platform from agent_util import float import json NETWORK_FS = ["ncpfs", "nfs", "ntfs", "smb", "vfat", "smb2", "cifs", "nfs4"] # Timeout after 10 seconds, so we don't get hung on remote filesystems TIMEOUT_LIMIT = 10 def get_findmnt_cmd(extra_args=""): timeout = "" if agent_util.which("timeout"): timeout = "timeout %s " % TIMEOUT_LIMIT return "%sfindmnt --fstab --df --bytes --raw --evaluate --all %s" % ( timeout, extra_args, ) def get_df_cmd(extra_arg=""): timeout = "" if agent_util.which("timeout"): timeout = "timeout %s " % TIMEOUT_LIMIT df_cmd = "df -PkT" if "vmware" in sys.platform: df_cmd = "df -kT" elif "sunos" in sys.platform: df_cmd = "df -kt" elif "darwin" in sys.platform or "aix" in sys.platform or "freebsd" in sys.platform: df_cmd = "df -Pk" return "%s%s %s" % (timeout, df_cmd, extra_arg) def get_idf_cmd(extra_arg=""): timeout = "" if agent_util.which("timeout"): timeout = "timeout %s " % TIMEOUT_LIMIT idf_cmd = "df -iPT" if "sunos" in sys.platform or "vmware" in sys.platform: idf_cmd = "df -iT" elif "aix" == sys.platform: idf_cmd = "df -ik" elif "darwin" == sys.platform or "freebsd" in sys.platform: idf_cmd = "df -Pik" return "%s%s %s" % (timeout, idf_cmd, extra_arg) class DiskDFParser: def __init__(self, log, config): self.log = log self.device_ignore_list = ( "tmpfs", "devtmpfs", "none", "proc", "swap", "devices", "cgroup", "/dev/loop", ) self.mountpoint_excludes = () cfg_device_list = config.get("device_ignore_list", None) if cfg_device_list is not None: self.device_ignore_list = self.parse_ignore_list(cfg_device_list) if "aix" in sys.platform or "sunos" in sys.platform: self.device_ignore_list = self.device_ignore_list + ( "/proc", "/swap", "/ahafs", ) if "darwin" == sys.platform: self.mountpoint_excludes = ("/Library/Developer/CoreSimulator/Volumes",) mpe = config.get("mountpoint_excludes", None) if mpe is not None: self.mountpoint_excludes = self.parse_ignore_list(mpe) def __str__(self): return "Disk df parser" def parse_ignore_list(self, device_list): try: dl_type = type(device_list) if type(tuple) == dl_type: return device_list if type("") == dl_type: if "(" in device_list and ")" in device_list: device_list_items = ( device_list.replace("(", "").replace(")", "").split(",") ) items = [d.strip().strip('"') for d in device_list_items] return tuple(items) except: self.log.error("Error parsing device list {}".format(device_list)) return () def parse_df_output(self, output): outlines = output.splitlines() headers = self.build_header_data(outlines[0]) df_table = {} for df_line in outlines[1:]: df_line = df_line.strip().split() mount_point = None mount_point_idx = headers.get("mounted on", None) if mount_point_idx: mount_point = " ".join(df_line[mount_point_idx:]) if not mount_point: self.log.warning("No mount point in {}".format(df_line)) continue df_table[mount_point] = {} for entry in headers.keys(): val = df_line[headers[entry]] if "mounted on" == entry: val = mount_point df_table[mount_point][entry] = val return df_table def build_header_data(self, header_line): hdr_idx = 0 headers = {} for hdr in header_line.split(): # # For lines that end with 'Mounted on' - skip the last split # hdr = hdr.lower() if "on" == hdr: continue if "mounted" == hdr: hdr = "mounted on" if hdr in ["iuse%", "%iused"]: hdr = "iuse_pct" elif hdr in ["available", "avail"]: hdr = "available" headers[hdr] = hdr_idx hdr_idx += 1 return headers def get_device_data(self, output, key_map, custom_network_fs): df_table = self.parse_df_output(output) devices = [] max_disk = {} for mountpoint_table in df_table.values(): try: device_key = key_map.get("device") device = mountpoint_table[device_key] fs_type_key = key_map.get("fs_type") filesystem = mountpoint_table.get(fs_type_key, "") skip_device = False for test_device in self.device_ignore_list: if device.startswith(test_device) or filesystem.startswith( test_device ): self.log.debug("Skipping metadata for device %s" % device) skip_device = True break if skip_device: continue mounted_key = key_map.get("mountpoint") mounted = mountpoint_table.get(mounted_key, None) if not mounted: continue skip_mp = False for mp in self.mountpoint_excludes: if mounted.startswith(mp): self.log.debug("Skipping mountpoint {}".format(mounted)) skip_mp = True break if skip_mp: continue desc = "%s mounted at %s" % (device, mounted) devices.append( { "device": device, "mountpoint": mounted, "filesystem": filesystem, "resource": desc, "is_network": filesystem in NETWORK_FS or filesystem in custom_network_fs, } ) available_key = key_map.get("available") available = mountpoint_table.get(available_key, None) if available is not None: max_disk[desc] = available except: self.log.error("Unable to parse df output") continue return devices, max_disk class DiskUsagePlugin(agent_util.Plugin): textkey = "disk" label = "Disk" darwin_fstype_excludes = "nullfs,nodev,devfs,autofs" # adding min for disk usage min_capacity = 0 if "AIX" in os.uname(): sys.platform = "aix" @classmethod def dump_disk_output(self, config, cmd, raw_output): if config.get("debug", False): self.log.debug("#####################################################") self.log.debug("Disk command '%s' output :" % cmd) self.log.debug(raw_output) self.log.debug("#####################################################") @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if not agent_util.which("df", exc=False): self.log.warning("df binary not found") status = agent_util.UNSUPPORTED if agent_util.SUPPORTED != status: return {} # See if there are custom DF flags specified in the config file extra_df_arg = self.gather_extra_df_arg(config=config) # See if the config file specifies to use findmnt to identify expected disks use_findmnt = config.get("use_findmnt") and agent_util.which("findmnt") extra_findmnt_arg = config.get("extra_findmnt_arg", "") custom_network_fs = config.get("network_fs", []) if custom_network_fs: custom_network_fs = custom_network_fs.split(",") table_keys = { "device": "filesystem", "fs_type": "type", "mountpoint": "mounted on", "available": "available", } if use_findmnt: table_keys = { "device": "source", "fs_type": "fstype", "mountpoint": "target", "available": "avail", } block_query = get_findmnt_cmd(extra_findmnt_arg) inode_query = block_query else: block_query = get_df_cmd(extra_df_arg) inode_query = get_idf_cmd(extra_df_arg) parser = DiskDFParser(self.log, config) ret_code, block_result = agent_util.execute_command( block_query, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) if 0 != ret_code: devices = [] max_disk = {} msg = "Command exit status {}".format(ret_code) self.log.error("{} exit status {}".format(block_query, ret_code)) status = agent_util.UNSUPPORTED else: self.dump_disk_output(config, block_query, block_result) devices, max_disk = parser.get_device_data( block_result, table_keys, custom_network_fs ) inode_status = agent_util.SUPPORTED idevices = [] imax_disk = {} inode_status_msg = None if "sunos" in sys.platform or "hp-ux" in sys.platform or "aix" in sys.platform: inode_status = agent_util.UNSUPPORTED inode_status_msg = "Unsupported on this platform" else: ret_code, inode_result = agent_util.execute_command( inode_query, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) if 0 != ret_code: inode_status_msg = "Command exit status {}".format(ret_code) self.log.error("{} exit status {}".format(inode_query, ret_code)) inode_status = agent_util.UNSUPPORTED else: if not use_findmnt: table_keys["available"] = "ifree" self.dump_disk_output(config, inode_query, inode_result) idevices, imax_disk = parser.get_device_data( inode_result, table_keys, custom_network_fs ) options_schema = { "device": "string", "mountpoint": "string", "filesystem": "string", "resource": "string", "is_network": "boolean", } data = { "usage.percent_used": { "label": "Percentage of disk used", "options": devices, "options_schema": options_schema, "status": status, "error_message": msg, "unit": "percent", "min_value": 0, "max_value": 100, }, "usage.kb_available": { "label": "Disk space available", "options": devices, "options_schema": options_schema, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": max_disk, }, "filesystem.mounted": { "label": "Filesystem mounted", "options": devices, "options_schema": options_schema, "status": status, "error_message": msg, }, "inode.percent_used": { "label": "Inodes percent used", "options": idevices, "options_schema": options_schema, "status": inode_status, "error_message": inode_status_msg, "unit": "percent", "min_value": 0, "max_value": 100, }, "inode.used": { "label": "Inode used", "options": idevices, "options_schema": options_schema, "status": inode_status, "error_message": inode_status_msg, "unit": "Inodes", "min_value": 0, "max_value": imax_disk, }, "inode.available": { "label": "Inodes Available", "options": idevices, "options_schema": options_schema, "status": inode_status, "error_message": inode_status_msg, "unit": "Inodes", "min_value": 0, "max_value": imax_disk, }, } # no inodes for vmware to_del = [] if "vmware" in sys.platform: for k in data.keys(): if "inode" in k: to_del.append(k) for d in to_del: del data[d] return data def collect_vmware(self, textkey, mounted): ret, output = agent_util.execute_command( "stat -f %s" % mounted, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) # make sure it's mounted first if ret != 0 and textkey != "filesystem.mounted": self.log.error("Unable to find disk %s, is it mounted?!" % mounted) self.log.error(output) return None elif ret != 0 and textkey == "filesystem.mounted": return 0 block_size = 0 metrics = {} for line in output.split("\n"): l = str(line).strip().lower() if l.startswith("file:") or l.startswith("id:"): continue elif l.startswith("block size:"): block_size = l.split()[-1] if l.startswith("blocks:"): try: btext, ttext, total_size, ftext, free_size, atext, avail_size = ( l.split() ) except: self.log.error("Unable to parse disk output!") self.log.error(output) return None metrics["usage.percent_used"] = 100.0 - ( (float(free_size) / float(total_size)) * 100 ) metrics["usage.kb_available"] = float(free_size) * float(block_size) return metrics[str(textkey)] @classmethod def gather_extra_df_arg(self, config): extra_df_arg = config.get("extra_df_arg", "") if "darwin" in sys.platform: configKey = "ignore_fstypes" ignores = self.darwin_fstype_excludes if config.get(configKey, None): ignores = "{},{}".format(ignores, config.get(configKey)) extra_df_arg = "{} -T no{}".format(extra_df_arg, ignores) return extra_df_arg def check(self, textkey, dev_mount, config): dev_mount = dev_mount.split() mounted = " ".join(dev_mount[3:]) extra_df_arg = self.gather_extra_df_arg(config) if "vmware" in sys.platform: return self.collect_vmware(textkey, mounted) is_inode_query = False if textkey.startswith("i"): df_cmd = get_idf_cmd(extra_df_arg) is_inode_query = True else: df_cmd = get_df_cmd(extra_df_arg) rc, output = agent_util.execute_command( df_cmd, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) if 0 != rc: return None self.log.debug("%s output: %s" % (df_cmd, output)) parser = DiskDFParser(self.log, config) df_data = parser.parse_df_output(output) mountpoint_data = df_data.get(mounted, None) if not mountpoint_data: self.log.error("Mountpoint %r not found" % mounted) if textkey == "filesystem.mounted": return False return None def convert_capacity_field(capacity): if capacity is None: return None if capacity == "-": return 0 else: return int(capacity.rstrip("%")) if "filesystem.mounted" == textkey: return True if textkey in ["usage.percent_used", "inode.percent_used"]: key = "capacity" if is_inode_query: key = "iuse_pct" return convert_capacity_field(mountpoint_data.get(key, None)) key = None if "inode.used" == textkey: key = "iused" elif "inode.available" == textkey: key = "ifree" elif "usage.kb_available" == textkey: key = "available" if not key: return None mv = mountpoint_data.get(key, None) if mv is None: return None if "-" == mv: return 0 return int(mv) docker.py000064400000115721151700142040006366 0ustar00import logging import os import sys try: import psutil except: psutil = None import math import re import agent_util DOCKER_SOCKET = "/var/run/docker.sock" NANOSECONDS = 1000000000 CLOCK_TICKS = 100 class DockerPlugin(agent_util.Plugin): textkey = "docker" label = "Docker" ######################################################### # Metadata # ######################################################### @classmethod def is_cgroups_v2(self): return os.path.isfile("/sys/fs/cgroup/cgroup.controllers") @classmethod def map_cgroup_v2_io_textkey_to_metric(self, textkey): textkey_to_metric = { "io.bytes_read": "rbytes", "io.bytes_written": "wbytes", "io.read_ops": "rios", "io.write_ops": "wios", } return textkey_to_metric.get(textkey, None) @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED if not agent_util.which("docker"): self.log.info("docker not present") status = agent_util.UNSUPPORTED msg = "Docker binary not found on instance" return {} return { "containers.num_running": { "label": "Number of containers running", "options": None, "status": status, "error_message": "", "unit": "count", }, "containers.num_running_img": { "label": "Number of containers running image", "options": None, "option_string": True, "status": status, "error_message": "", "unit": "count", }, "containers.num_running_name": { "label": "Number of containers running by name", "options": None, "option_string": True, "status": status, "error_message": "", "unit": "count", }, } @classmethod def get_metadata_docker(self, container, config): status = agent_util.SUPPORTED msg = None metadata = {} metadata.update(self.get_cpu_metadata(container, config)) metadata.update(self.get_memory_metadata(container, config)) metadata.update(self.get_network_metadata(container, config)) metadata.update(self.get_io_metadata(container, config)) metadata.update( { # Container is running "status.running": { "label": "Container is Running", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "boolean", }, # Disk metrics are always available "disk.size_rw": { "label": "Size RW", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, "disk.size_root_fs": { "label": "Size Root FS", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, } ) return metadata @classmethod def get_cpu_metadata(self, container, config): container_id = container["Id"] cpu_metadata = { "cpu.usage_percentage": { "label": "CPU Usage Percentage", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "%", }, "cpu.user_usage": { "label": "CPU Percent Used [User]", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "%", }, "cpu.sys_usage": { "label": "CPU Percent Used [System]", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "%", }, } if self.is_cgroups_v2(): stat_file = "/sys/fs/cgroup/system.slice/docker-{}.scope/cpu.stat".format( container_id ) stats = DockerPlugin.read_stats_from_file(stat_file) textkey_map = { "cpu.usage_percentage": "usage_usec", "cpu.user_usage": "user_usec", "cpu.sys_usage": "system_usec", } for key in cpu_metadata.keys(): if stats.get(textkey_map[key], None) is None: cpu_metadata[key]["error_message"] = ( "Cannot access docker stats file" ) cpu_metadata[key]["status"] = agent_util.UNSUPPORTED else: # map textkey to docker interface file used for metrics textkey_map = { "cpu.usage_percentage": "cpuacct.usage", "cpu.user_usage": "cpuacct.usage_user", "cpu.sys_usage": "cpuacct.usage_sys", } for textkey in cpu_metadata.keys(): file_name = textkey_map.get(textkey, None) if file_name is None: self.log.warning( "Docker CPU metadata: missing map key {}".format(textkey) ) continue fpath = "/sys/fs/cgroup/cpuacct/docker/{}/{}".format( container_id, file_name ) metric = DockerPlugin.read_single_stat_file(fpath) if metric is None: cpu_metadata[textkey]["status"] = agent_util.UNSUPPORTED cpu_metadata[textkey]["error_msg"] = "Can't access '{}'".format( file_name ) return cpu_metadata @classmethod def read_single_stat_file(self, file_name): try: with open(file_name, "r") as f: metric = f.read() return float(metric) except Exception: self.log.exception("Read stat file {} failure".format(file_name)) return None @classmethod def read_stats_from_file(self, file_name): try: with open(file_name, "r") as f: output = f.readlines() stats = {} for line in output: stat_type, num = line.split(" ") stats[stat_type] = float(num) return stats except Exception: self.log.exception("Read stats from {} failure".format(file_name)) return {} @classmethod def read_io_stats_v2(self, container): result = {} try: successes = 0 identifier = None container_stats = ( "/sys/fs/cgroup/system.slice/docker-{}.scope/io.stat".format(container) ) with open(container_stats, "r") as cf: line = cf.read() identifier = line.split()[0] metric_line_split = None with open("/sys/fs/cgroup/io.stat", "r") as sf: lines = sf.readlines() for line in lines: items = line.split(" ") if items[0] == identifier: result["identifier"] = items[0] metric_line_split = items break if metric_line_split is None: raise Exception("No identifier for container") else: for item in metric_line_split[1:]: metric_and_value = item.strip("\n").split("=") if 2 == len(metric_and_value): try: result[metric_and_value[0]] = int(metric_and_value[1]) except: pass except Exception: _, e = sys.exc_info()[:2] result["error_msg"] = str(e) return result @classmethod def get_memory_metadata(self, container, config): container_id = container["Id"] if self.is_cgroups_v2(): memory_metadata = { "memory.usage": { "label": "Memory Used", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, "memory.mapped_file": { "label": "Memory Mapped File", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, } stats_file = ( "/sys/fs/cgroup/system.slice/docker-{}.scope/memory.stat".format( container_id ) ) metrics = DockerPlugin.read_stats_from_file(stats_file) if metrics.get("file_mapped", None) is None: memory_metadata["memory.mapped_file"]["error_message"] = ( "Cannot read {}".format(stats_file) ) memory_metadata["memory.mapped_file"]["status"] = agent_util.UNSUPPORTED memory_current = ( "/sys/fs/cgroup/system.slice/docker-{}.scope/memory.current".format( container_id ) ) metric = DockerPlugin.read_single_stat_file(memory_current) if metric is None: memory_metadata["memory.usage"]["error_message"] = ( "Cannot read {}".format(stats_file) ) memory_metadata["memory.usage"]["status"] = agent_util.UNSUPPORTEDev return memory_metadata memory_metadata = { "memory.usage": { "label": "Memory Used", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, "memory.cache": { "label": "Memory Cached", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, "memory.rss": { "label": "Memory RSS", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, "memory.mapped_file": { "label": "Memory Mapped File", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, "memory.swap": { "label": "Swap Used", "options": None, "status": agent_util.SUPPORTED, "error_message": "", "unit": "bytes", }, } total_metric = self.read_single_stat_file( "/sys/fs/cgroup/memory/docker/%s/memory.usage_in_bytes" % container_id ) if total_metric is None: memory_metadata["memory.usage"]["status"] = agent_util.UNSUPPORTED memory_metadata["memory.usage"]["error_message"] = ( "Can't access 'memory.usage_in_bytes'" ) stats = self.read_stats_from_file( "/sys/fs/cgroup/memory/docker/%s/memory.stat" % container_id ) for key in memory_metadata.keys(): if "memory.usage" == key: continue metric_key = key.split(".")[1] if metric_key not in stats: memory_metadata[key]["status"] = agent_util.UNSUPPORTED memory_metadata[key]["error_msg"] = "Unable to read stats file" return memory_metadata @classmethod def find_physical_ethernet_interface(self): try: cmd = """ find /sys/class/net -type l -not -lname '*virtual*' -printf '%f\n' """ rc, out = agent_util.execute_command(cmd) if 0 != rc: raise Exception("Non-zero rc") return out.strip("\n") except: return "eth0" @classmethod def get_network_metadata(self, container, config): container_id = container["Id"] status = agent_util.UNSUPPORTED msg = "" # Get the PID try: conn = agent_util.UnixHTTPConnection(DOCKER_SOCKET) conn.request( "GET", "/containers/%s/json" % container_id, headers={"Host": "localhost"}, ) r = conn.getresponse().read() j = agent_util.json_loads(r) container_pid = j["State"]["Pid"] except Exception: container_pid = None msg = "Can't get container's PID" if container_pid: phys_eth = "{}:".format(self.find_physical_ethernet_interface()) try: with open("/proc/%s/net/dev" % container_pid, "r") as f: output = f.readlines() eth0 = False for line in output: if line.lstrip().startswith(phys_eth): eth0 = True split = line.split() if len(split) == 17: status = agent_util.SUPPORTED else: msg = "Unexpected # of columns in /proc/<pid>/net/dev" break if not eth0: msg = "Can't find {} device on container".format(phys_eth) except Exception: msg = "Can't access /proc/<pid>/net/dev" return { "net.rx_bytes": { "label": "Bytes In Per Second", "options": None, "status": status, "error_message": msg, "unit": "bytes/sec", }, "net.rx_packets": { "label": "Packets In Per Second", "options": None, "status": status, "error_message": msg, "unit": "packets/sec", }, "net.rx_errs": { "label": "RX Errors Per Second", "options": None, "status": status, "error_message": msg, "unit": "errors/sec", }, "net.tx_bytes": { "label": "Bytes Out Per Second", "options": None, "status": status, "error_message": msg, "unit": "bytes/sec", }, "net.tx_packets": { "label": "Packets Out Per Second", "options": None, "status": status, "error_message": msg, "unit": "packets/sec", }, "net.tx_errs": { "label": "TX Errors Per Second", "options": None, "status": status, "error_message": msg, "unit": "errors/sec", }, } @classmethod def get_io_metadata(self, container, config): io_metadata = { "io.bytes_written": { "label": "Bytes Written Per Second", "options": None, "status": agent_util.UNSUPPORTED, "error_message": None, "unit": "bytes/s", }, "io.bytes_read": { "label": "Bytes Read Per Second", "options": None, "status": agent_util.UNSUPPORTED, "error_message": None, "unit": "bytes/s", }, "io.write_ops": { "label": "Writes Per Second", "options": None, "status": agent_util.UNSUPPORTED, "error_message": None, "unit": "w/s", }, "io.read_ops": { "label": "Reads Per Second", "options": None, "status": agent_util.UNSUPPORTED, "error_message": None, "unit": "r/s", }, } container_id = container["Id"] if self.is_cgroups_v2(): cgv2_status = agent_util.UNSUPPORTED cgv2_msg = "" io_stats_data = self.read_io_stats_v2(container_id) cgv2_msg = io_stats_data.get("eror_msg", None) if cgv2_msg is not None: for v in io_metadata.values(): v["status"] = cgv2_status v["error_message"] = cgv2_msg return io_metadata for textkey in io_metadata.keys(): metric_key = self.map_cgroup_v2_io_textkey_to_metric(textkey) if metric_key is None: io_metadata[textkey]["error_message"] = ( "Unknown metric from {}".format(textkey) ) continue if io_stats_data.get(metric_key, None) is not None: io_metadata[textkey]["status"] = agent_util.SUPPORTED else: io_metadata[textkey]["error_message"] = "Unable to read i/o stats" return io_metadata service_bytes = agent_util.UNSUPPORTED service_bytes_message = "" try: fpath = ( "/sys/fs/cgroup/blkio/docker/%s/blkio.throttle.io_service_bytes" % container_id ) with open(fpath, "r") as f: f.read() service_bytes = agent_util.SUPPORTED except Exception: service_bytes_message = "Can't access 'blkio.throttle.io_service_bytes'" for tk in ["io.bytes_written", "io.bytes_read"]: io_metadata[tk]["status"] = service_bytes io_metadata[tk]["error_message"] = service_bytes_message operations = agent_util.UNSUPPORTED operations_message = "" try: fpath = ( "/sys/fs/cgroup/blkio/docker/%s/blkio.throttle.io_serviced" % container_id ) with open(fpath, "r") as f: f.read() operations = agent_util.SUPPORTED except Exception: operations_message = "Can't access 'blkio.throttle.io_serviced'" for tk in ["io.write_ops", "io.read_ops"]: io_metadata[tk]["status"] = operations io_metadata[tk]["error_message"] = operations_message return io_metadata ######################################################### # Checks # ######################################################### def check(self, textkey, data, config): if textkey.startswith("containers."): return self.get_containers_metric(textkey, data, config) def get_containers_metric(self, textkey, data, config): def get_running_containers(): try: conn = agent_util.UnixHTTPConnection(DOCKER_SOCKET) conn.request("GET", "/containers/json", headers={"Host": "localhost"}) r = conn.getresponse().read() j = agent_util.json_loads(r) return j except Exception: self.log.exception("Get running containers error") return None if textkey == "containers.num_running": running = get_running_containers() return len(running) elif textkey == "containers.num_running_img": running = get_running_containers() search = data or "*" search = search.replace("*", ".*") search = search.replace('""', ".*") count = 0 for container in running: image = container.get("Image", "") if re.search(search, image): count += 1 return count elif textkey == "containers.num_running_name": running = get_running_containers() search = data or "*" search = search.replace("*", ".*") search = search.replace('""', ".*") count = 0 for container in running: names = container.get("Names", []) for name in names: if re.search(search, name): count += 1 return count def check_docker(self, container, textkey, data, config): if textkey.startswith("cpu."): return self.get_cpu_metric(container, textkey, data, config) elif textkey.startswith("memory."): return self.get_memory_metric(container, textkey, data, config) elif textkey.startswith("net."): return self.get_network_metric(container, textkey, data, config) elif textkey.startswith("disk."): return self.get_disk_metric(container, textkey, data, config) elif textkey.startswith("io."): return self.get_io_metric(container, textkey, data, config) elif textkey.startswith("status."): return self.get_status_metric(container, textkey, data, config) return None def _read_cpu_metric(self, textkey, container_id): if self.is_cgroups_v2(): stat_file = "/sys/fs/cgroup/system.slice/docker-{}.scope/cpu.stat".format( container_id ) stats = DockerPlugin.read_stats_from_file(stat_file) if "cpu.usage_percentage" == textkey: return stats.get("usage_usec", None) elif "cpu.user_usage" == textkey: return stats.get("user_usec", None) elif "cpu.sys_usage" == textkey: return stats.get("system_usec", None) self.log.warning( "Unrecognized textkey {} in _read_cpu_metric".format(textkey) ) return None stat_file = None base_path = "/sys/fs/cgroup/cpuacct/docker/{}".format(container_id) if "cpu.usage_percentage" == textkey: stat_file = os.path.join(base_path, "cpuacct.usage") elif "cpu.user_usage" == textkey: stat_file = os.path.join(base_path, "cpuacct.usage_user") elif "cpu.sys_usage" == textkey: stat_file = os.path.join(base_path, "cpuacct.usage_sys") if stat_file is None: self.log.error( "Unrecognized textkey {} in _read_cpu_metric".format(textkey) ) return None return DockerPlugin.read_single_stat_file(stat_file) def get_cpu_metric(self, container, textkey, data, config): container_id = container["Id"] def get_total_system(): cpu_times = psutil.cpu_times() total_system = 0 for key in ["user", "nice", "system", "idle", "iowait", "irq", "softirq"]: total_system += getattr(cpu_times, key) * 100 total_system = (total_system * NANOSECONDS) / CLOCK_TICKS if self.is_cgroups_v2(): total_system = total_system / 1000 return total_system if textkey == "cpu.usage_percentage": last_system = self.get_cache_results( "docker.cpu.usage_percentage", "total_system" ) if last_system: last_system = last_system[0][1] else: last_system = None last_container = self.get_cache_results( "docker.cpu.usage_percentage", container_id ) if last_container: last_container = last_container[0][1] else: last_container = None total_system = get_total_system() self.cache_result( "docker.cpu.usage_percentage", "total_system", total_system, replace=True, ) total_container = self._read_cpu_metric(textkey, container_id) if total_container is None: return None self.cache_result( "docker.cpu.usage_percentage", container_id, total_container, replace=True, ) if last_system is None or last_container is None: return None container_delta = total_container - last_container system_delta = total_system - last_system num_cpus = psutil.cpu_count() return (float(container_delta) / system_delta) * num_cpus * 100.0 elif textkey == "cpu.user_usage": last_system = self.get_cache_results( "docker.cpu.user_usage", "total_system" ) if last_system: last_system = last_system[0][1] else: last_system = None last_container = self.get_cache_results( "docker.cpu.user_usage", container_id ) if last_container: last_container = last_container[0][1] else: last_container = None total_system = get_total_system() self.cache_result( "docker.cpu.user_usage", "total_system", total_system, replace=True ) container_val = self._read_cpu_metric(textkey, container_id) if container_val is None: return None self.cache_result( "docker.cpu.user_usage", container_id, container_val, replace=True ) if last_system is None or last_container is None: return None container_delta = container_val - last_container system_delta = total_system - last_system num_cpus = psutil.cpu_count() return (float(container_delta) / system_delta) * num_cpus * 100.0 elif textkey == "cpu.sys_usage": last_system = self.get_cache_results("docker.cpu.sys_usage", "total_system") if last_system: last_system = last_system[0][1] else: last_system = None last_container = self.get_cache_results( "docker.cpu.sys_usage", container_id ) if last_container: last_container = last_container[0][1] else: last_container = None total_system = get_total_system() self.cache_result( "docker.cpu.sys_usage", "total_system", total_system, replace=True ) container_val = self._read_cpu_metric(textkey, container_id) if container_val is None: return None self.cache_result( "docker.cpu.sys_usage", container_id, container_val, replace=True ) if last_system is None or last_container is None: return None container_delta = container_val - last_container system_delta = total_system - last_system num_cpus = psutil.cpu_count() return (float(container_delta) / system_delta) * num_cpus * 100.0 def get_memory_metric(self, container, textkey, data, config): container_id = container["Id"] def get_total_bytes(): fname = ( "/sys/fs/cgroup/memory/docker/%s/memory.usage_in_bytes" % container_id ) if self.is_cgroups_v2(): fname = ( "/sys/fs/cgroup/system.slice/docker-{}.scope/memory.current".format( container_id ) ) return DockerPlugin.read_single_stat_file(fname) def get_memory_stats(): fname = "/sys/fs/cgroup/memory/docker/%s/memory.stat" % container_id if self.is_cgroups_v2(): fname = ( "/sys/fs/cgroup/system.slice/docker-{}.scope/memory.stat".format( container_id ) ) return DockerPlugin.read_stats_from_file(fname) if textkey == "memory.usage": try: total_bytes = get_total_bytes() if self.is_cgroups_v2(): return total_bytes memory_stats = get_memory_stats() return total_bytes - memory_stats["cache"] except Exception: self.log.exception("Docker get memory.usage error") return None elif textkey in [ "memory.cache", "memory.rss", "memory.mapped_file", "memory.swap", ]: try: memory_stats = get_memory_stats() if not self.is_cgroups_v2(): stat_type = textkey.split(".")[1] return memory_stats[stat_type] if "memory.mapped_file" == textkey: return memory_stats["file_mapped"] raise Exception("Unrecognized textkey {}".format(textkey)) except Exception: self.log.exception("Docker get {} error".format(textkey)) return None def get_container_pid(self, container): conn = None try: container_id = container["Id"] conn = agent_util.UnixHTTPConnection(DOCKER_SOCKET) conn.request( "GET", "/containers/%s/json" % container_id, headers={"Host": "localhost"}, ) r = conn.getresponse().read() j = agent_util.json_loads(r) return j["State"]["Pid"] except Exception: self.log.exception("Get container pid error") return None finally: try: conn.close() except: pass def get_network_metric(self, container, textkey, data, config): container_id = container["Id"] # Find the container's PID container_pid = self.get_container_pid(container) if container_pid is None: return None phys_eth = "{}:".format(DockerPlugin.find_physical_ethernet_interface()) def get_proc_stats(pid): proc_file = "/proc/%s/net/dev" % pid with open(proc_file, "r") as f: content = f.readlines() eth0_line = None for line in content: if line.lstrip().startswith(phys_eth): eth0_line = line break if not eth0_line: raise Exception("No line for {} in {}".format(phys_eth, proc_file)) eth0_line = eth0_line.split() keys = [ "", "rx_bytes", "rx_packets", "rx_errs", "", "", "", "", "", "", "tx_bytes", "tx_packets", "tx_errs", "", "", "", "", "", ] stats = {} for col, text in enumerate(eth0_line): key = keys[col] if key: stats[key] = int(text) return stats if textkey in [ "net.rx_bytes", "net.rx_packets", "net.rx_errs", "net.tx_bytes", "net.tx_packets", "net.tx_errs", ]: key = textkey.split(".")[1] last = self.get_cache_results("docker.net", key) if last: last_val = last[0][1] seconds = last[0][0] else: last_val = None seconds = None try: stats = get_proc_stats(container_pid) stat = stats[key] except Exception: self.log.exception( "Error accessing /proc/%s/net/dev: %s", container_pid, e ) return None self.cache_result("docker.net", key, stat, replace=True) if last_val is None: return None return (stat - last_val) / seconds def get_disk_metric(self, container, textkey, data, config): container_id = container["Id"] try: conn = agent_util.UnixHTTPConnection(DOCKER_SOCKET) conn.request( "GET", "/containers/%s/json?size=true" % container_id, headers={"Host": "localhost"}, ) r = conn.getresponse().read() j = agent_util.json_loads(r) except Exception: self.log.exception("Docker get disk metric error") return None if textkey == "disk.size_rw": return j.get("SizeRw", None) elif textkey == "disk.size_root_fs": return j.get("SizeRootFs", None) def get_metric_as_bytes(self, metric_string): try: index = 0 while True: if metric_string[index].isdigit() or "." == metric_string[index]: index += 1 continue break metric_value = float(metric_string[0:index]) units = metric_string[index:].lower() self.log.info( "metric_string {} -> {} {}".format(metric_string, metric_value, units) ) conversion = 1 if "kib" == units: conversion = 1000 elif "mib" == units: conversion = math.pow(1024, 2) elif "gib" == units: conversion = math.pow(1024, 3) elif "kb" == units: conversion = 1000 elif "mb" == units: conversion = math.pow(1000, 2) elif "gb" == units: conversion = math.pow(1000, 3) return metric_value * conversion except Exception: self.log.exception("get_metric_as_bytes error") return None def _get_docker_block_stats(self, container, textkey): """ Read the I/O metrics from docker stats, because the /proc io file is read-only to root. """ import json def parse_multi_metric_entry(metric_line): items = metric_line.split("/") items = metric_line.split("/") metrics = [item.strip() for item in items] rv = [] for metric in metrics: rv.append(self.get_metric_as_bytes(metric)) return rv try: container_id = container["Id"] rc, output = agent_util.execute_command( "docker stats {} --no-stream --format json".format(container_id), cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT, ) if 0 != rc: self.log.error("Docker stats failure: {}".format(rc)) return None data = agent_util.json_loads(output) self.log.debug("Docker Stats result: {}".format(json.dumps(data, indent=1))) mld = parse_multi_metric_entry(data["BlockIO"]) if 2 != len(mld): self.log.error("get_docker_block_stats error: Unexpected metric count") self.log.info(output) return None if "io.bytes_out" == textkey: return mld[1] elif "io.bytes_in" == textkey: return mld[0] else: return None except Exception: self.log.exception("get_docker_block_stats error") return None def get_io_metric(self, container, textkey, data, config): container_id = container["Id"] def get_total(fname, operation_type): with open(fname, "r") as f: lines = f.readlines() total = 0 for line in lines: if line.startswith("Total"): continue device, op_type, num = line.split(" ") if op_type == operation_type: total += int(num) return total key = textkey.split(".")[1] last = self.get_cache_results("docker.io", key) if last: last_val = last[0][1] seconds = last[0][0] else: last_val = None seconds = None new_val = None if self.is_cgroups_v2(): io_metrics = DockerPlugin.read_io_stats_v2(container_id) if io_metrics.get("error_msg", None) is not None: self.log.error("I/O stats error {}".format(io_metrics["error_msg"])) return None mtk = DockerPlugin.map_cgroup_v2_io_textkey_to_metric(textkey) if io_metrics.get(mtk, None) is None: self.log.error("No metric found for {}".format(textkey)) return None new_val = io_metrics[mtk] else: if textkey in ["io.bytes_written", "io.bytes_read"]: try: fname = ( "/sys/fs/cgroup/blkio/docker/%s/blkio.throttle.io_service_bytes" % container_id ) if "written" in textkey: new_val = get_total(fname, "Write") elif "read" in textkey: new_val = get_total(fname, "Read") except Exception: self.log.error("Error accessing %s", fname) return None elif textkey in ["io.write_os", "io.read_ops"]: try: fname = ( "/sys/fs/cgroup/blkio/docker/%s/blkio.throttle.io_serviced" % container_id ) if "write" in textkey: new_val = get_total(fname, "Write") elif "read" in textkey: new_val = get_total(fname, "Read") except Exception: self.log.error("Error accessing %s", fname) return None if new_val is None: return None self.cache_result("docker.io", key, new_val, replace=True) if last_val is None: return None if new_val < last_val: return None return (new_val - last_val) / seconds def get_status_metric(self, container, textkey, data, config): if textkey == "status.running": if container.get("State") != "running": return 0 return 1 elasticsearch.py000064400000005635151700142040007733 0ustar00import agent_util class ElasticSearchPlugin(agent_util.Plugin): textkey = "elasticsearch" label = "Elastic Search" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if not config: self.log.info( "The [elasticsearch] config block not found in the config file" ) return {} if not "hostname" in config or not "port" in config: self.log.info( "The [elasticsearch] config block does not contain variables for hostname and/or port" ) return {} if not agent_util.which("curl", exc=False): self.log.info("curl not found!") status = agent_util.UNSUPPORTED msg = "Curl is not installed - please install" return {} options = [] data = { "number_of_nodes": { "label": "Total number of nodes in cluster", "options": None, "status": status, "error_message": msg, }, "number_of_data_nodes": { "label": "Number of data nodes in cluster", "options": None, "status": status, "error_message": msg, }, "active_primary_shards": { "label": "Number of active primary shards", "options": None, "status": status, "error_message": msg, }, "active_shards": { "label": "Total number of shards", "options": None, "status": status, "error_message": msg, }, "relocating_shards": { "label": "Number of shards getting relocated", "options": None, "status": status, "error_message": msg, }, "initializing_shards": { "label": "Number of initializing shards", "options": None, "status": status, "error_message": msg, }, "unassigned_shards": { "label": "Number of unassigned shards", "options": None, "status": status, "error_message": msg, }, } return data def check(self, textkey, data, config): user_string = "" if config.get("username") and config.get("password"): user_string = "--user %s:%s" % ( config.get("username"), config.get("password"), ) cmd = "curl %s %s:%s/_cluster/health" % ( user_string, config["hostname"], config["port"], ) ret, output = agent_util.execute_command(cmd) reply = agent_util.json_loads(output) return int(reply[textkey]) entropy.py000064400000001642151700142040006613 0ustar00import agent_util import os import re import sys from agent_util import float class EntropyPlugin(agent_util.Plugin): textkey = "entropy" label = "Entropy" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if not os.path.exists("/proc/sys/kernel/random/entropy_avail"): status = agent_util.UNSUPPORTED msg = "/proc/sys/kernel/random/entropy_avail does not exist." data = { "entropy.avail": { "label": "Entropy level", "options": None, "status": status, "error_message": msg, } } return data def check(self, textkey, data, config): if textkey == "entropy.avail": return float( open("/proc/sys/kernel/random/entropy_avail", "r").read().strip() ) return 0 exim.py000064400000002004151700142040006046 0ustar00import agent_util class EximPlugin(agent_util.Plugin): textkey = "exim" label = "Exim" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None exim_bin = agent_util.which("exim") if not exim_bin: self.log.info("couldn't find exim binary") status = agent_util.UNSUPPORTED msg = "Couldn't find exim binary" return {} data = { "queue_depth": { "label": "Exim queue depth", "options": None, "status": status, "error_message": msg, } } return data def check(self, textkey, data, config={}): exim_bin = agent_util.which("exim", exc=True) retcode, output = agent_util.execute_command("%s -bpc" % exim_bin) self.log.debug("exim -bpc output: %s" % str(output)) output = output.splitlines() fields = output[0].split() return int(fields[0]) file_presence.py000064400000012103151700142040007710 0ustar00import logging import agent_util import glob import os import datetime import re import time import sys from agent_util import float class FilePresencePlugin(agent_util.Plugin): textkey = "files" label = "Filesystem" # this is here because it's used in 2 different places newer_than_regex = re.compile( r"(?P<op>(a|c|m)time)<(?P<delta>\d+)(?P<metric>d|h|m|s)" ) @classmethod def get_metadata(self, config): data = { "file.exists": { "label": "File exists", "options": None, "status": agent_util.SUPPORTED, "error_message": None, "unit": "boolean", "option_string": True, }, "file.count": { "label": "File count", "options": None, "status": agent_util.SUPPORTED, "error_message": None, "unit": "files", "option_string": True, }, "file.created": { "label": "Creation age", "options": None, "status": agent_util.SUPPORTED, "error_message": None, "unit": "minutes", "option_string": True, }, "file.modified": { "label": "Modification age", "options": None, "status": agent_util.SUPPORTED, "error_message": None, "unit": "minutes", "option_string": True, }, "directory.size": { "label": "Directory size (KB)", "options": None, "status": agent_util.SUPPORTED, "error_message": None, "unit": "kilobytes", "option_string": True, }, "file.size": { "label": "File size (KB)", "options": None, "status": agent_util.SUPPORTED, "error_message": None, "unit": "kilobytes", "option_string": True, }, } return data def check(self, textkey, path, config): path = path.strip() for date in re.findall(r"\$.[^$]+", path): date_format = ( date.replace("YYYY", "%Y") .replace("YY", "%y") .replace("MM", "%m") .replace("DD", "%d") .replace("D", "%w") .replace("ww", "%W") .replace("hh", "%H") .replace("mm", "%M") .replace("ss", "%S") .replace("Z", "%z") .replace("$", "") .strip() ) date_string = datetime.datetime.now().strftime(date_format) path = path.replace(date, date_string) self.log.debug("File path for file_presence plugin: %s" % path) if textkey == "file.exists": glob_results = glob.glob(path) has_results = len(glob_results) > 0 return_val = has_results self.log.debug("Return value for file.exists textkey: %s" % str(return_val)) return return_val elif textkey == "file.count": if os.path.isdir(path): glob_results = glob.glob(path + "/*") return_val = len(glob_results) self.log.debug( "Return value for file.count textkey: %s" % str(return_val) ) else: glob_results = glob.glob(path) return_val = len(glob_results) self.log.debug( "Return value for file.count textkey: %s" % str(return_val) ) return return_val elif textkey == "file.created": if not os.path.exists(path): return 0.0 age = time.time() - os.path.getctime(path) return_val = float(age) / 60.0 self.log.debug( "Return value for file.created textkey: %s" % str(return_val) ) return return_val elif textkey == "file.modified": if not os.path.exists(path): return 0.0 age = time.time() - os.path.getmtime(path) return_val = float(age) / 60.0 self.log.debug( "Return value for file.modified textkey: %s" % str(return_val) ) return return_val elif textkey == "file.size": if not os.path.exists(path): return 0.0 return_val = float(os.path.getsize(path)) / 1024.0 self.log.debug("Return value for file.size textkey: %s" % str(return_val)) return return_val elif textkey == "directory.size": if not os.path.exists(path): return 0.0 ret, output = agent_util.execute_command("du -sk " + path) return_val = float(output.split()[0]) self.log.debug("Return value for file.size textkey: %s" % str(return_val)) return return_val haproxy.py000064400000032722151700142040006610 0ustar00import agent_util import os from agent_util import float import sys ###################################3 # -*- coding: utf-8 -*- import logging import socket logger = logging.getLogger(__name__) class TimeoutException(Exception): pass ################################## def get_status_headers(config): # branch logic based on socket vs http interface if "socket_cfg_file" in config: buffer = "" client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) client.connect(config["socket_cfg_file"]) client.settimeout(2.0) out = "show stat" + "\n" client.send(out.encode("utf-8")) f = client.makefile() output = f.read().split("\n") client.close() if ( "stats_http_host" in config and "stats_http_port" in config and "stats_http_path" in config ): auth_string = "" if "stats_http_user" in config and "stats_http_password" in config: auth_string = "%s:%s@" % ( config["stats_http_user"], config["stats_http_password"], ) stats_endpoint = "http://%s%s:%s/%s?stats;csv;norefresh" % ( auth_string, config["stats_http_host"], config["stats_http_port"], config["stats_http_path"].replace("/", ""), ) ret, output = agent_util.execute_command("curl '%s'" % stats_endpoint) # extract the values from the output output = output.split("\n") header_line = output[0].split(",") metric_values = [] for values in output[1:]: if len(header_line) == len(values.split(",")): metric_values.append(dict(zip(header_line, values.split(",")))) return metric_values, output class HAProxyPlugin(agent_util.Plugin): textkey = "haproxy" label = "HAProxy" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if haproxy is installed installed = agent_util.which("haproxy") if not installed: msg = "haproxy binary not found" self.log.info(msg) status = agent_util.UNSUPPORTED return {} # if http interface is being used, check if curl is present if "stats_http_host" in config and not agent_util.which("curl", exc=False): self.log.info("curl not found!") status = agent_util.UNSUPPORTED msg = "Curl is not installed - please install" return {} # if sock interface is being used, check if we can access the file if "socket_cfg_file" in config: socket_cfg_file = config["socket_cfg_file"] if config.get("debug", False): self.log.debug("HAProxy socket file path %s" % socket_cfg_file) if not os.access(socket_cfg_file, os.W_OK): self.log.error( "Agent does not have permission to access the HAProxy socket file. Please adjust permissions." ) status = agent_util.MISCONFIGURED msg = "Agent does not have permission to access the HAProxy socket file. Please adjust permissions." return {} pxservers = set() svnames = set() try: metric_values = None output = None metric_values, output = get_status_headers(config) if config.get("debug", False): self.log.info("#####################################################") self.log.info("HAProxy command 'show stat' output:") self.log.info(output) self.log.info("#####################################################") for values in metric_values: pxname = values["# pxname"] svname = values["svname"] if svname == "BACKEND": # We skip the Backend checks continue pxservers.add(pxname + " " + svname) except Exception: status = agent_util.MISCONFIGURED self.log.exception("Couldn't get haproxy status") self.log.info("#####################################################") self.log.info("HAProxy command 'show stat' output:") self.log.info(output) self.log.info("#####################################################") msg = "Couldn't get haproxy status, make sure haproxy is running, haproxy configuration file is valid and the status module is enabled" pxservers = list(pxservers) pxservers.sort() backend_pxservers = list( filter(lambda x: "http_back" in x or "BACKEND" in x, pxservers) ) pxservers += ["Total Backend", "Total Frontend"] if status is agent_util.SUPPORTED and not pxservers: status = agent_util.MISCONFIGURED msg = "No Proxy servers found." metadata = { "qcur": { "label": "Current HAProxy service queued requests ", "options": pxservers, "status": status, "error_message": msg, }, "scur": { "label": "Current HAProxy service sessions", "options": pxservers, "status": status, "error_message": msg, }, "stot": { "label": "HAProxy service sessions", "options": pxservers, "status": status, "error_message": msg, }, "stot_rate": { "label": "HAProxy service session rate", "options": pxservers, "status": status, "error_message": msg, "unit": "sessions per second", }, "bin": { "label": "Bytes In of HAProxy service", "options": pxservers, "status": status, "error_message": msg, }, "bin_rate": { "label": "Rate of Bytes In of HAProxy service", "options": pxservers, "status": status, "error_message": msg, "unit": "B/s", }, "bout": { "label": "Bytes Out of HAProxy service", "options": pxservers, "status": status, "error_message": msg, }, "bout_rate": { "label": "Rate of Bytes Out of HAProxy service", "options": pxservers, "status": status, "error_message": msg, "unit": "B/s", }, "ereq": { "label": "HAProxy service error requests", "options": pxservers, "status": status, "error_message": msg, }, "eresp": { "label": "HAProxy service response errors", "options": pxservers, "status": status, "error_message": msg, }, "econ": { "label": "HAProxy service connection errors", "options": pxservers, "status": status, "error_message": msg, }, "rate": { "label": "Sessions created per second", "options": pxservers, "status": status, "error_message": msg, }, "req_rate": { "label": "HTTP requests per second", "options": pxservers, "status": status, "error_message": msg, }, "dreq": { "label": "Requests denied due to ACL restrictions", "options": pxservers, "status": status, "error_message": msg, }, "act": { "label": "Number of HAProxy service active servers", "options": backend_pxservers, "status": status, "error_message": msg, }, "dresp": { "label": "Responses denied due to ACL restrictions", "options": pxservers, "status": status, "error_message": msg, }, "wredis": { "label": "Redispatched requests", "options": pxservers, "status": status, "error_message": msg, }, "wretr": { "label": "Connection retries", "options": pxservers, "status": status, "error_message": msg, }, "bck": { "label": "Number of HAProxy service backup servers", "options": backend_pxservers, "status": status, "error_message": msg, }, "hrsp_1xx": { "label": "http responses with 1xx code", "options": pxservers, "status": status, "error_message": msg, "unit": "req/min", }, "hrsp_2xx": { "label": "http responses with 2xx code", "options": pxservers, "status": status, "error_message": msg, "unit": "req/min", }, "hrsp_3xx": { "label": "http responses with 3xx code", "options": pxservers, "status": status, "error_message": msg, "unit": "req/min", }, "hrsp_4xx": { "label": "http responses with 4xx code", "options": pxservers, "status": status, "error_message": msg, "unit": "req/min", }, "hrsp_5xx": { "label": "http responses with 5xx code", "options": pxservers, "status": status, "error_message": msg, "unit": "req/min", }, "hrsp_other": { "label": "http responses with other codes (protocol error)", "options": pxservers, "status": status, "error_message": msg, "unit": "req/min", }, "status": { "label": "webserver status", "options": pxservers, "status": status, "error_message": msg, "unit": "boolean", }, } return metadata def check(self, textkey, pxserver, config): metric_values, output = get_status_headers(config) res = 0 isRate = False cached = 0 cache = None if ( textkey.endswith("_rate") and (textkey != "req_rate") ) or textkey.startswith("hrsp"): isRate = True if not textkey.startswith("hrsp"): textkey = textkey.split("_")[0] self.log.debug("Cache textkey {}".format(textkey)) cache = self.get_cache_results(textkey, "haproxy") if pxserver.startswith("Total"): # Calculate the total of all services svname = pxserver.split(" ")[-1].upper() res = 0 for metric in metric_values: if metric.get("svname") == svname: self.log.debug( "Total Calc SV {} Textkey {} PX {} Val {}".format( metric.get("svname"), textkey, metric["# pxname"], metric[textkey], ) ) try: if metric[textkey] == "": continue elif metric[textkey] == "UP" or metric[textkey] == "OPEN": res = 1 elif metric[textkey] == "DOWN": res = 0 else: res += int(metric[textkey]) except Exception: self.log.exception("Unable to parse metric value") else: # Checking a specific haproxy service metric. pxname = pxserver[: pxserver.rfind(" " + pxserver.split(" ")[-1])] svname = pxserver.split(" ")[-1] for metric in metric_values: self.log.debug("Metric value: %s " % metric[textkey]) if pxname == metric["# pxname"] and svname == metric["svname"]: res = metric[textkey] if metric[textkey] in ("UP", "OPEN"): res = 1 elif "DOWN" == metric[textkey]: res = 0 else: try: res = int(res) except: return None if isRate is True: self.cache_result(textkey, "haproxy", res) if not cache: return None delta, cached_result = cache[0] if res < cached_result: return None val = (res - cached_result) / float(delta) if textkey.startswith("hrsp"): # We want actually requests per minute. val = val * 60 return val return res io_stats.py000064400000032624151700142040006744 0ustar00import agent_util from sys import platform, exc_info import logging import re from agent_util import float class DevicePathManager: def __init__(self, execute_function=None): if not execute_function: self._execute_function = agent_util.execute_command else: self._execute_function = execute_function try: command = "lsblk --pairs" if "darwin" in platform: disk_util = agent_util.which("diskutil") command = "{} list".format(disk_util) self._output = self._execute_function(command) self._output = self._output[1].split("\n") except Exception: logging.exception("Error getting devices") self._output = [] def find_path(self, device): if "darwin" in platform: for line in self._output: if line.startswith("/"): mp = line.split(" ")[0] if mp[len("/dev/") :] == device: return mp return None else: for line in self._output: expression = r'^NAME="(%s)".*MOUNTPOINT="(.*)"$' % (device) match = re.match(expression, line) if match: matched_mountpoint = match.groups()[1] if matched_mountpoint == "": # Some devices don't show a mountpoint, in those # cases, return the same device. return device else: return matched_mountpoint class IOStatPlugin(agent_util.Plugin): textkey = "iostat" label = "IO" metrics_list = [ "rrqm/s", "wrqm/s", "r/s", "w/s", "svctm", "rkB/s", "wkB/s", "%w", "%b", "wait", "actv", "kr/s", "kw/s", "svc_t", "%util", "r_await", "w_await", ] metrics_labels = { "rrqm/s": "Read requests queued", "wrqm/s": "Write requests queued", "r/s": "Read requests", "w/s": "Write requests", "svctm": "Average I/O request service time", "%w": "% of time transactions are waiting", "%b": "Percent of time the disk is busy", "wait": "Average transactions waiting for service", "actv": "Average transactions being serviced", "kr/s": "Data read rate", "kw/s": "Data write rate", "svc_t": "Average response time", "%util": "% of I/O CPU time", "r_await": "Read request average time", "w_await": "Write request average time", } metrics_units = { "rrqm/s": "requests/second", "wrqm/s": "requests/second", "r/s": "requests/second", "w/s": "requests/second", "svctm": "ms", "%w": "percent", "%b": "percent", "wait": "transactions", "actv": "transactions", "kr/s": "KB/s", "kw/s": "KB/s", "svc_t": "Average response time", "%util": "% of I/O CPU time", "r_await": "ms", "w_await": "ms", } darwinMetricsMap = {"kbpt": "KB/t", "tps": "tps", "mbps": "MB/s"} @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None iostat_bin = agent_util.which("iostat") if not iostat_bin: self.log.info("iostat not found") status = agent_util.MISCONFIGURED msg = "Install iostat." return {} self.log.debug("Starting IOStat") # get our devices to monitor devices = [] data = {} output = None header = [] options_schema = {"resource": "string", "mountpoint": "string"} device_path_manager = DevicePathManager() if "hp-ux" in platform: ret, out = agent_util.execute_command("%s | sed '/^loop/d' " % iostat_bin) lines = out.strip().splitlines() for line in lines: if line.startswith("device") or line == "" or not line: continue device = line.split()[0] mountpoint = device_path_manager.find_path(device) devices.append( { "resource": device, "mountpoint": mountpoint, } ) self.log.debug("Devices found: %s" % devices) metdata = { "bps": { "label": "kilobytes per second", "options": devices, "status": status, "error_message": msg, "unit": "kb", "options_schema": options_schema, }, "sps": { "label": "disk seeks per second", "options": devices, "status": status, "error_message": msg, "unit": "", "options_schema": options_schema, }, } return metdata elif "darwin" in platform: ret, out = agent_util.execute_command("%s -d" % iostat_bin) if 0 != ret: status = agent_util.MISCONFIGURED msg = "iostat failure code {}".format(ret) else: lines = out.strip().splitlines() devs = lines[0].strip().split() for device in devs: mp = device_path_manager.find_path(device) if mp: devices.append({"resource": device, "mountpoint": mp}) else: status = agent_util.MISCONFIGURED msg = "Could not map device" break metadata = { "kbpt": { "label": "Kilobytes per transfer", "options": devices, "status": status, "error_msg": msg, "unit": "KB/s", "options_schema": options_schema, }, "tps": { "label": "Transfers per second", "options": devices, "status": status, "error_msg": msg, "unit": "transfers/s", "options_schema": options_schema, }, "mbps": { "label": "Megabytes per second", "options": devices, "status": status, "error_msg": msg, "unit": "MB/s", "options_schema": options_schema, }, } return metadata else: ret_code, output = agent_util.execute_command( "%s -dx | sed '/^loop/d' " % iostat_bin ) output = output.strip().splitlines() self.log.debug("#####################################################") self.log.debug("IO stats command '%s -dx' output:" % iostat_bin) self.log.debug(str(output)) self.log.debug("#####################################################") for line in reversed(output): if line.lower().startswith("device"): header = line.split() break fields = line.strip().split() dev = fields[0] if dev.lower().startswith("dm-"): continue mountpoint = device_path_manager.find_path(dev) devices.append({"resource": dev, "mountpoint": mountpoint}) self.log.debug("Devices: %s" % str(devices)) # no devices? no resources to monitor then if not devices: status = agent_util.MISCONFIGURED msg = "No devices found from iostat." for metric in header: self.log.debug("###########\nMetric: %s" % str(metric)) if metric in self.metrics_list: self.log.debug( "metric %s has the index value of %s" % (str(metric), str(header.index(metric))) ) data[str(metric)] = { "label": self.metrics_labels.get(str(metric), str(metric)), "options": devices, "options_schema": options_schema, "status": status, "error_message": msg, } if str(metric) in self.metrics_units: data[str(metric)]["unit"] = self.metrics_units.get(str(metric)) return data def check(self, textkey, device, config): metrics_index = {} iostat_bin = agent_util.which("iostat") second_line = False header_line = 2 if "freebsd" in platform: header_line = 1 if "sunos" in platform: second_line = True ret, output = agent_util.execute_command( "%s -dx 1 2 | sed '/^loop/d'" % (iostat_bin), cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT, ) header = output.strip().splitlines()[1].split() elif "hp-ux" in platform: ret, out = agent_util.execute_command( "iostat 1 2 | sed '/^loop/d'", cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT, ) lines = out.strip().splitlines() previously_seen = False for line in lines: line = line.strip() if previously_seen is False and line.startswith(device): previously_seen = True self.log.debug("Found first instance of disk %s" % device) elif previously_seen is True and line.startswith(device): self.log.debug("Found second instance of disk %s" % device) self.log.debug(line) l = line.split() if textkey == "bps": return float(l[1]) elif textkey == "sps": return float(l[2]) else: return None elif "darwin" in platform: try: outputKey = self.darwinMetricsMap.get(textkey, None) if outputKey is None: raise Exception("Unrecognized textkey {}".format(textkey)) ret, output = agent_util.execute_command( "%s -d -c 2" % iostat_bin, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT, ) if 0 != ret: raise Exception("iostat failure, error {}".format(ret)) metricsCount = len(self.darwinMetricsMap) lines = output.strip().split("\n") if 4 != len(lines): self.log.error("Unrecognized iostat output") self.log.error(output) raise Exception("Unrecognized iostat output") devices = lines[0].split() metrics = lines[1].split() metric_values = lines[3].split() devIndex = devices.index(device) si = devIndex * metricsCount ei = si + metricsCount deviceMetrics = metrics[si:ei] di = deviceMetrics.index(outputKey) return float(metric_values[si:ei][di]) except Exception: err = exc_info()[1] error = str(err) self.log.error( "Collection error {}, {}: {}".format(device, textkey, error) ) return None else: second_line = True cmd_to_run = "%s -dx 1 2 | sed '/^loop/d'" % (iostat_bin) ret, output = agent_util.execute_command( cmd_to_run, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) if ret != 0: self.log.error("{} failed with status {}".format(cmd_to_run, ret)) return None header = output.strip().splitlines()[header_line].split() splitted_lines = output.strip().split("\n") full = list( filter(lambda x: re.match(r"^%s .*$" % (device), x), splitted_lines) ) if full: if second_line: full = full[1] else: full = full[0] else: self.log.error("Device %s could not be found in output!" % device) return None if not header: self.log.error("Device %s no longer exists!" % device) return None for metric in header: self.log.debug("###########\nMetric: %s" % str(metric)) if metric in self.metrics_list: metrics_index[str(metric)] = header.index(str(metric)) self.log.debug("#####################################################") self.log.debug( "IO stats command '%s -dx %s 1 2' output:" % (iostat_bin, device) ) self.log.debug(str(full)) self.log.debug("#####################################################") self.log.debug("iostat -dx output: %s" % str(output)) j = full.strip().split() if device in j[0]: return float(j[int(metrics_index[str(textkey.split(".")[-1])])]) return 0 jboss.py000064400000033244151700142040006236 0ustar00import agent_util import logging from agent_util import float from urllib.parse import quote logger = logging.getLogger(__name__) def check_for_curl_installation(): result = agent_util.which("curl") if result != "None": return True return False def execute_query(config, query, data=None): if "username" in config: username = config["username"] + ":" else: username = "" if "password" in config and config["password"].strip(): password = config["password"].strip() + "@" else: password = "" url = config["console_url"].replace("://", "://" + username + password) # JBoss API does not handle a double-/ well url.rstrip("/") if check_for_curl_installation(): queryType = "curl --digest --silent" else: queryType = "wget -qO-" if data is not None: encoded_data = quote(data, safe="") query = query % (queryType, url, encoded_data) else: query = query % (queryType, url) ret, output = agent_util.execute_command(query) return str(output) class JBossPlugin(agent_util.Plugin): textkey = "jboss" label = "JBoss" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check for jboss configuration block if ( "username" not in config or "console_url" not in config or "password" not in config ): self.log.info("jboss is not configured") status = agent_util.MISCONFIGURED msg = "jboss is not configured properly" return {} # check if jboss is even installed or running - any response to server-state is enough to pass here query = '%s "%s/management?operation=attribute&name=server-state"' output = execute_query(config, query) if config.get("debug", False): self.log.debug("#####################################################") self.log.debug("Jboss command '%s' output:" % query) self.log.debug(str(output)) self.log.debug("#####################################################") if output.strip() == "": self.log.info("jboss is not running or installed") status = agent_util.UNSUPPORTED msg = "jboss not found" return {} source_options = [] query = ( '%s "%s/management/subsystem/datasources?read-resource' '&include-runtime=true&recursive&json.pretty"' ) output = execute_query(config, query) if config.get("debug", False): self.log.debug("#####################################################") self.log.debug("Jboss command '%s' output:" % query) self.log.debug(str(output)) self.log.debug("#####################################################") json_output = agent_util.json_loads(output) for children in json_output["data-source"]: if children not in source_options: source_options.append(children) if status is agent_util.SUPPORTED and not source_options: status = agent_util.MISCONFIGURED msg = "No data-source found on JBoss server." data = { "jdbc.prepared_statement_cache_access_count": { "label": "Prepared statement cache access", "options": source_options, "status": status, "error_message": msg, }, "jdbc.prepared_statement_cache_add_count": { "label": "Prepared statement cache add", "options": source_options, "status": status, "error_message": msg, }, "jdbc.prepared_statement_cache_current_size": { "label": "Prepared statement cache current size", "options": source_options, "status": status, "error_message": msg, }, "jdbc.prepared_statement_cache_delete_count": { "label": "Prepared statement cache delete", "options": source_options, "status": status, "error_message": msg, }, "jdbc.prepared_statement_cache_hit_count": { "label": "Prepared statement cache hit", "options": source_options, "status": status, "error_message": msg, }, "jdbc.prepared_statement_cache_miss_count": { "label": "Prepared statement cache miss", "options": source_options, "status": status, "error_message": msg, }, "pool.active_count": { "label": "Pool active count", "options": source_options, "status": status, "error_message": msg, }, "pool.available_count": { "label": "Pool available count", "options": source_options, "status": status, "error_message": msg, }, "pool.average_blocking_time": { "label": "Pool average blocking time", "options": source_options, "status": status, "error_message": msg, }, "pool.average_creation_time": { "label": "Pool average creation time", "options": source_options, "status": status, "error_message": msg, }, "pool.created_count": { "label": "Pools created", "options": source_options, "status": status, "error_message": msg, }, "pool.destroyed_count": { "label": "Pools destroyed", "options": source_options, "status": status, "error_message": msg, }, "pool.max_creation_time": { "label": "Pools max creation time", "options": source_options, "status": status, "error_message": msg, }, "pool.max_used_count": { "label": "Pools max used count", "options": source_options, "status": status, "error_message": msg, }, "pool.max_wait_time": { "label": "Pools max wait time", "options": source_options, "status": status, "error_message": msg, }, "pool.timed_out": { "label": "Pools timed out", "options": source_options, "status": status, "error_message": msg, }, "pool.total_blocking_time": { "label": "Pools total blocking time", "options": source_options, "status": status, "error_message": msg, }, "pool.TotalCreationTime": { "label": "Pools total creation time", "options": source_options, "status": status, "error_message": msg, }, "idle_timeout_minutes": { "label": "Time spend for idle pools to be timeout", "options": source_options, "status": status, "error_message": msg, }, "jvm.heap.used": { "label": "Total heap memory used", "options": None, "status": status, "error_message": msg, "unit": "MB", }, "jvm.heap.committed": { "label": "Total heap memory committed", "options": None, "status": status, "error_message": msg, "unit": "MB", }, "jvm.nonheap.used": { "label": "Total non-heap memory used", "options": None, "status": status, "error_message": msg, "unit": "MB", }, "jvm.nonheap.committed": { "label": "Total non-heap memory committed", "options": None, "status": status, "error_message": msg, "unit": "MB", }, "jvm.threads.live": { "label": "Total number of live threads used", "options": None, "status": status, "error_message": msg, }, "jvm.threads.daemon": { "label": "Number of daemon threads used", "options": None, "status": status, "error_message": msg, }, } return data def check(self, textkey, data, config): if textkey.split(".")[0] in ["jbc", "pool", "idle_timeout_minutes"]: query = ( '%s "%s/management/subsystem/datasources/data-source/%s/statistics' '?read-resource&include-runtime=true&recursive&json.pretty"' ) json_output = execute_query(config, query, data) data = agent_util.json_loads(json_output) if textkey == "jdbc.prepared_statement_cache_access_count": val = data["statistics"]["jdbc"]["PreparedStatementCacheAccessCount"] elif textkey == "jdbc.prepared_statement_cache_add_count": val = data["statistics"]["jdbc"]["PreparedStatementCacheAddCount"] elif textkey == "jdbc.prepared_statement_cache_current_size": val = data["statistics"]["jdbc"]["PreparedStatementCacheCurrentSize"] elif textkey == "jdbc.prepared_statement_cache_delete_count": val = data["statistics"]["jdbc"]["PreparedStatementCacheDeleteCount"] elif textkey == "jdbc.prepared_statement_cache_hit_count": val = data["statistics"]["jdbc"]["PreparedStatementCacheHitCount"] elif textkey == "jdbc.prepared_statement_cache_miss_count": val = data["statistics"]["jdbc"]["PreparedStatementCacheMissCount"] elif textkey == "pool.active_count": val = data["statistics"]["pool"]["ActiveCount"] elif textkey == "pool.available_count": val = data["statistics"]["pool"]["AvailableCount"] elif textkey == "pool.average_blocking_time": val = data["statistics"]["pool"]["AverageBlockingTime"] elif textkey == "pool.average_creation_time": val = data["statistics"]["pool"]["AverageCreationTime"] elif textkey == "pool.created_count": val = data["statistics"]["pool"]["CreatedCount"] elif textkey == "pool.destroyed_count": val = data["statistics"]["pool"]["DestroyedCount"] elif textkey == "pool.max_creation_time": val = data["statistics"]["pool"]["MaxCreationTime"] elif textkey == "pool.max_used_count": val = data["statistics"]["pool"]["MaxUsedCount"] elif textkey == "pool.max_wait_time": val = data["statistics"]["pool"]["MaxWaitTime"] elif textkey == "pool.timed_out": val = data["statistics"]["pool"]["TimedOut"] elif textkey == "pool.total_blocking_time": val = data["statistics"]["pool"]["TotalBlockingTime"] elif textkey == "pool.TotalCreationTime": val = data["statistics"]["pool"]["TotalCreationTime"] elif textkey == "idle_timeout_minutes": val = data["statistics"]["idle-timeout-minutes"] else: val = 0 elif textkey.split(".")[0] in ["jvm"]: query = ( '%s "%s/management/core-service/platform-mbean' '?read-attribute&recursive&json.pretty"' ) json_output = execute_query(config, query, data) data = agent_util.json_loads(json_output) heap_used_total = 0 heap_committed_total = 0 non_heap_used_total = 0 non_heap_committed_total = 0 if textkey.split(".")[1] in ["heap", "nonheap"]: data = data["type"]["memory-pool"] for name, value in data["name"].items(): if value["type"] == "HEAP": heap_used_total += value["usage"]["used"] heap_committed_total += value["usage"]["committed"] elif value["type"] == "NON_HEAP": non_heap_used_total += value["usage"]["used"] non_heap_committed_total += value["usage"]["committed"] elif textkey.split(".")[1] in ["threads"]: data = data["type"]["threading"] conversion = 1024**2 if textkey == "jvm.heap.used": val = heap_used_total / conversion elif textkey == "jvm.heap.committed": val = heap_committed_total / conversion elif textkey == "jvm.nonheap.used": val = non_heap_used_total / conversion elif textkey == "jvm.nonheap.committed": val = non_heap_committed_total / conversion elif textkey == "jvm.threads.live": val = data["thread-count"] elif textkey == "jvm.threads.daemon": val = data["daemon-thread-count"] else: val = 0 else: val = 0 if not val: return 0.0 return float(val) jmx.py000064400000020616151700142040005713 0ustar00import agent_util import logging import sys logger = logging.getLogger(__name__) def parse_jmx_config(config): """ Split the jmx configuration into multiple instances based on the number of comma separated instances there are. :param config: Dict :return: List of Dicts """ configs = {} jvm_path = None for key, value in config.items(): if key == "jvm_path": jvm_path = value elif key == "debug": continue else: for i, inner in enumerate(value.split(",")): if i not in configs.keys(): configs[i] = {} if len(value) > 0: configs[i][key] = inner.strip(" ") for key in configs: configs[key]["jvm_path"] = jvm_path if sys.version_info >= (3, 0): return list(configs.values()) else: return configs.values() class JMXPlugin(agent_util.Plugin): textkey = "jmx" label = "JMX" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # Check for jmx configuration block if not config: self.log.info("No JMX configuration found") return {} configs = parse_jmx_config(config) # Check for config setting sin jmx configuration block invalid_configs = [] missing_keys = [] for entry in configs: for key in ["port", "host", "jvm_path"]: if key not in entry.keys(): invalid_configs.append(entry) missing_keys.append(key) configs.remove(entry) if len(invalid_configs) == len(configs): msg = ( "Missing value for %s in the [jmx] block of the agent config file." % missing_keys ) self.log.info(msg) status = agent_util.MISCONFIGURED try: import jpype except: msg = "Unable to access JMX metrics due to missing jpype library." self.log.info(msg) status = agent_util.MISCONFIGURED try: if status == agent_util.SUPPORTED and not jpype.isJVMStarted(): jpype.startJVM(config["jvm_path"]) except: msg = "Unable to access JMX metrics because JVM cannot be started." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED: invalid_configs = [] for entry in configs: try: from jpype import java, javax jhash = java.util.HashMap() if config.get("username") and config.get("password"): jarray = jpype.JArray(java.lang.String)( [config["username"], config["password"]] ) jhash.put( javax.management.remote.JMXConnector.CREDENTIALS, jarray ) url = "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi" % ( entry["host"], int(entry["port"]), ) jmxurl = javax.management.remote.JMXServiceURL(url) javax.management.remote.JMXConnectorFactory.connect(jmxurl, jhash) except: self.log.exception("Unable to connect to JMX %s" % str(entry)) invalid_configs.append(entry) if len(invalid_configs) == len(configs): msg = ( "Unable to access JMX metrics, JMX is not running or not installed. Check configs %s" % (str(invalid_configs)) ) self.log.info(msg) status = agent_util.MISCONFIGURED metadata = { "jmx.custom": { "label": "Custom JMX Metric", "options": None, "status": status, "error_message": msg, "option_string": True, } } return metadata def check(self, textkey, data, config): try: import jpype from jpype import java, javax configs = parse_jmx_config(config) jvm_path = configs[0]["jvm_path"] try: data, port = data.split("::") except ValueError: port = None # No port came in the data. if len(configs) > 1: self.log.error( "Port information missing from mBean. Unable to pick environment to execute. Aborting" ) return if port: found = False for config in configs: if config["port"] == port: found = True break if not found: # Check sent a port that doesn't match. self.log.error( "Port %s is not one of the configured ones. Check cannot execute" % port ) return if not jpype.isJVMStarted(): jpype.startJVM(jvm_path) jhash = java.util.HashMap() if config.get("username") and config.get("password"): jarray = jpype.JArray(java.lang.String)( [config["username"], config["password"]] ) jhash.put(javax.management.remote.JMXConnector.CREDENTIALS, jarray) url = "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi" % ( config["host"], int(config["port"]), ) jmxurl = javax.management.remote.JMXServiceURL(url) jmxsoc = javax.management.remote.JMXConnectorFactory.connect(jmxurl, jhash) connection = jmxsoc.getMBeanServerConnection() """ Look for two Mbeans || separated and calculate the %. The expected input will be: MBean1||MBean2. Example for MemoryHeapUsage: java.lang:type=Memory/HeapMemoryUsage/used||java.lang:type=Memory/HeapMemoryUsage/committed """ if "||" in data: data = data.split("||") # Create a list from the two MBeans || separated. calc = [] for i in data: # Iterate the list to get each MBean value that will be set for the % calculation. res = self._get_bean_value(i, connection) if res is None: return None calc.append(res) return 100 * calc[0] / calc[1] return self._get_bean_value(data, connection) except: self.log.exception("Error gathering JMX metric") return def _get_bean_value(self, bean_str, connection): try: from jpype import javax last_slash = bean_str.rfind("/") if -1 == last_slash: return None obj_name = bean_str[:last_slash] java_obj = javax.management.ObjectName(obj_name) attribute = bean_str[last_slash + 1 :] # self.log.debug('Checking object name {} attr {}'.format(obj_name, attribute)) if connection.isRegistered(java_obj): res = connection.getAttribute(java_obj, attribute) return res.floatValue() last_slash = obj_name.rfind("/") if -1 == last_slash: return None key = attribute next_obj_name = obj_name[:last_slash] java_obj = javax.management.ObjectName(next_obj_name) attribute = obj_name[last_slash + 1 :] # self.log.debug('Checking object name {} attr {}'.format(next_obj_name, attribute)) if connection.isRegistered(java_obj): res = connection.getAttribute(java_obj, attribute) if hasattr(res, "contents"): res = res.contents.get(key) else: res = res.get(key) return res.floatValue() self.log.error("Could not find object name %s" % bean_str) return None except: self.log.exception("_get_bean_value %s caught an exception" % bean_str) return None linux_logs.py000064400000006534151700142040007303 0ustar00# -*- coding: utf-8 -*- import agent_util import logging from glob import glob from library.log_matcher import LogMatcher class LinuxLogsPlugin(agent_util.Plugin): textkey = "linux_log" label = "Event log" @classmethod def get_metadata(self, config): data = { "count": { "label": "Event entry count", "options": None, "status": agent_util.SUPPORTED, "error_message": None, "unit": "count", "option_string": True, } } return data def check(self, textkey, data, config={}): # Passed data from the check schedule log_source = data.get("log_source") timescale = data.get("timescale") expression = data.get("filter") log_source = log_source.strip(" ") if "*" in log_source or "?" in log_source: # Handle multiple files files = glob(log_source) else: files = [log_source] file_inodes = {} total_metrics = 0 expression = expression or "*" expression = expression.replace("*", ".*") expression = expression.replace('""', ".*") for target in files: # Extract the file current inode try: file_inodes[target] = LogMatcher.get_file_inode(target) except OSError: import sys _, error, _ = sys.exc_info() logging.error("Error opening %s file." % (target)) logging.error(error) continue # Extract data from the agent cache about the check log_data = self.get_cache_results( textkey, "%s/%s" % (self.schedule.id, target) ) if log_data: log_data = log_data[0][-1] else: log_data = dict() last_line_number = log_data.get("last_known_line") stored_inode = log_data.get("inode") results = log_data.get("results", []) # Extract the lines of the file. try: total_lines, current_lines = LogMatcher.get_file_lines( last_line_number, target, file_inodes[target], stored_inode ) except IOError: import sys _, e, _ = sys.exc_info() logging.error("Could not open file: %s" % str(e)) return None logging.info( "Stored line %s Current line %s Looking at %s lines" % (str(last_line_number), str(total_lines), str(len(current_lines))) ) # Perform the matching of the expression in the lines log_matcher = LogMatcher(stored_inode) results = log_matcher.match(current_lines, expression) metric, results = log_matcher.calculate_metric(results, timescale) total_metrics += metric and metric or 0 cache_data = dict( inode=file_inodes[target], last_known_line=total_lines, results=results ) self.cache_result(textkey, "%s/%s" % (self.schedule.id, target), cache_data) logging.info( 'Found %s instances of "%s" in %s' % (str(metric or 0), expression, target) ) return total_metrics lm_sensors.py000064400000005050151700142040007274 0ustar00import agent_util import sys import os import platform from agent_util import float def build_sensor_dict(): cmd = """sensors -u""" ret, out = agent_util.execute_command(cmd) lines = out.splitlines() current_sensor_package = "" current_sensor = "" sensors_dict = {} for l in lines: l = l.lower() if not l or l == "" or "adapter" in l: continue if ":" in l: if not l or l == "" or "crit" in l or "max" in l: continue line = l.strip().split(":") if not line[1]: current_sensor = line[0].replace(" ", "_") else: sens_type = "temperature" if "fan" in current_sensor: sens_type = "fan_speed" textkey = "%s.%s.%s" % (current_sensor_package, current_sensor, line[0]) if sens_type not in sensors_dict: sensors_dict[sens_type] = {} sensors_dict[sens_type][textkey] = float(line[1]) else: current_sensor_package = l return sensors_dict class LMSensorsPlugin(agent_util.Plugin): textkey = "lm_sensors" label = "Hardware Sensors" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if not agent_util.which("sensors", exc=False): self.log.info("lm_sensors binary not found") status = agent_util.UNSUPPORTED msg = "lm_sensors binary not found" return {} sensors = build_sensor_dict() self.log.debug("Found sensor data:\n%s" % sensors) data = {} if "temperature" in sensors.keys(): temp_options = sorted(sensors["temperature"].keys()) data["temperature"] = { "label": "Sensor temperature", "options": temp_options, "status": status, "error_message": msg, "unit": "Celsius", } if "fan_speed" in sensors.keys(): fan_options = sorted(sensors["fan_speed"].keys()) data["fan_speed"] = { "label": "Fan speed", "options": fan_options, "status": status, "error_message": msg, "unit": "RPM", } return data def check(self, textkey, option, config): sensors = build_sensor_dict() value = sensors.get(textkey, {}).get(option) if value == None: return None return float(value) logstash_forwarder.py000064400000003213151700142040011006 0ustar00import agent_util import sys import os from datetime import datetime, timedelta class LogstashForwarderPlugin(agent_util.Plugin): textkey = "logstash_forwarder" label = "Logstash Forwarder" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if not os.path.isdir("/var/log/logstash-forwarder"): self.log.info("logstash-forwarder not found") status = agent_util.UNSUPPORTED msg = "logstash-forwarder log folder not found!" return {} data = { "logs_per_minute": { "label": "Logs forwarded per minute", "options": None, "status": status, "error_message": msg, "unit": None, } } return data def check(self, textkey, option, config): cmd = "tail -25 /var/log/logstash-forwarder/logstash-forwarder.err" retcode, output = agent_util.execute_command(cmd) logs_per_min = 0 last_minute = output.strip().split("\n") for item in last_minute: if "processing" in item: line = item.split() time = " ".join(line[0:2]) parts = time.split(".") time = datetime.strptime(parts[0], "%Y/%m/%d %H:%M:%S") time = time.replace(microsecond=int(parts[1])) if (datetime.now() - timedelta(seconds=60)) > time: continue else: logs_per_min = logs_per_min + int(line[-2]) else: continue return logs_per_min memcache.py000064400000014046151700142040006657 0ustar00import agent_util import socket def get_stats(host, port): size = 4096 # 1024 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, int(port))) s.send("stats \n") data = s.recv(size) data = data.replace("STAT ", "").replace("END", "") stats = dict(metric.split(" ", 1) for metric in data.splitlines() if metric != "") s.close() return stats class MemcachePlugin(agent_util.Plugin): textkey = "memcache" label = "Memcache" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None # check if memcached is even installed installed = agent_util.which("memcached") if not installed: self.log.info("memcached binary not found") status = agent_util.UNSUPPORTED return {} if not config: msg = "The [memcache] config block is not found in the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED and ( not "hostname" in config or not "port" in config ): msg = "hostname or port missing from the [memcache] block in the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED: try: stats = get_stats(config["hostname"], config["port"]) except: status = agent_util.MISCONFIGURED msg = "Unable to get memcache status information, please double check hostname and port on config file." self.log.error(msg) metadata = { "bytes": { "label": "Number of bytes", "options": None, "status": status, "error_message": msg, "unit": "B", }, "bytes_read": { "label": "Total number of bytes read", "options": None, "status": status, "error_message": msg, "unit": "B", }, "bytes_written": { "label": "Total number of bytes sent", "options": None, "status": status, "error_message": msg, "unit": "B", }, "cmd_get": { "label": "Total number of retrieval requests (get operations)", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "cmd_set": { "label": "Total number of storage requests (set operations)", "options": None, "status": status, "error_message": msg, "unit": "requests", }, "connection_structures": { "label": "Number of connection structures", "options": None, "status": status, "error_message": msg, "unit": "structures", }, "curr_connections": { "label": "Current number of open connections", "options": None, "status": status, "error_message": msg, "unit": "connections", }, "curr_items": { "label": "Current number of items", "options": None, "status": status, "error_message": msg, "unit": "items", }, "evictions": { "label": "Number of valid items removed from cache to free memory for new items", "options": None, "status": status, "error_message": msg, "unit": "items", }, "get_hits": { "label": "Number of keys that have been requested and found present", "options": None, "status": status, "error_message": msg, "unit": "keys", }, "get_misses": { "label": "Number of items that have been requested and not found", "options": None, "status": status, "error_message": msg, "unit": "items", }, "pointer_size": { "label": "Size of pointers", "options": None, "status": status, "error_message": msg, "unit": "bits", }, "rusage_system": { "label": "Total system time", "options": None, "status": status, "error_message": msg, "unit": "seconds:microseconds", }, "rusage_user": { "label": "Total user time", "options": None, "status": status, "error_message": msg, "unit": "seconds:microseconds", }, "threads": { "label": "Number of worker threads requested", "options": None, "status": status, "error_message": msg, "unit": "threads", }, "total_connections": { "label": "Total number of connections opened", "options": None, "status": status, "error_message": msg, "unit": "connections", }, "total_items": { "label": "Total number of items stored", "options": None, "status": status, "error_message": msg, "unit": "items", }, } return metadata def check(self, textkey, data, config): stats = get_stats(config["hostname"], config["port"]) res = 0 try: res = int(stats[textkey]) except: res = 0 return res memory_usage.py000064400000065102151700142040007610 0ustar00import agent_util import os import re import sys import socket from agent_util import float def search_esxtop(headers, search_string): for idx, column in enumerate(headers): if search_string in column: return idx return None class MemoryUsagePlugin(agent_util.Plugin): textkey = "memory" label = "Memory" # adding minimum RAM and swap usage min_ram = 0 min_swap = 0 @staticmethod def _parse_sysctl(stdout): lines = stdout.split("\n") sysctl = {} for line in lines: m = re.search(r"^(.+?)\s*:\s*(.+)$", line) if not m: continue k = str(m.group(1)) v = m.group(2) if v.isdigit(): v = int(v) sysctl[k] = v return sysctl @classmethod def get_metadata(self, config): if "freebsd" in sys.platform: sysctl = agent_util.which("sysctl") status = agent_util.SUPPORTED msg = None if not sysctl: self.log.info("sysctl binary not found") status = agent_util.UNSUPPORTED msg = "sysctl binary not found" return {} if status is agent_util.SUPPORTED: ret, output = agent_util.execute_command(sysctl + " -a") if config.get("debug", False): self.log.debug( "#####################################################" ) self.log.debug("Memory Usage command 'sysctl -a' output:") self.log.debug(str(output)) self.log.debug( "#####################################################" ) if ret != 0 or not output: status = agent_util.UNSUPPORTED msg = "error executing 'sysctl -a'" if status is agent_util.SUPPORTED: d = self._parse_sysctl(output) required_keys = ( "hw.pagesize", "hw.physmem", "vfs.bufspace", "vm.stats.vm.v_inactive_count", "vm.stats.vm.v_active_count", "vm.stats.vm.v_cache_count", "vm.stats.vm.v_free_count", ) # here we're making sure the set of all the keys we need # are in the set of all keys (subset) for key in required_keys: if key not in d.keys(): status = agent_util.UNSUPPORTED msg = "could not find all the required sysctl keys" break data = { "ram.percent": { "label": "RAM percent usage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "ram.kb_used": { "label": "RAM used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "ram.kb_buffer": { "label": "RAM buffer (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "ram.kb_active": { "label": "RAM active (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "ram.kb_inactive": { "label": "RAM inactive (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "ram.kb_cached": { "label": "RAM cached (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, } return data elif "darwin" in sys.platform: status = agent_util.SUPPORTED msg = "" data = { "ram.percent": { "label": "RAM percent usage", "options": None, "status": status, "error_message": msg, "unit": "percent", }, "ram.kb_used": { "label": "RAM used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "ram.kb_active": { "label": "RAM active (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "ram.kb_inactive": { "label": "RAM inactive (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, "ram.kb_wired": { "label": "RAM wired (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", }, } return data elif "aix" in sys.platform: status = agent_util.SUPPORTED msg = None # Figure out how much memory we have retcode, output = agent_util.execute_command("svmon -O,unit=KB") output = output.split("\n") max_ram = None max_swap = None for line in output: if line.startswith("memory"): parts = line.split() # max ram in kb max_ram = int(parts[1]) if line.startswith("pg space"): parts = line.split() max_swap = int(parts[2]) data = { "ram.percent": { "label": "RAM percent usage", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, }, "ram.kb_used": { "label": "RAM used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": max_ram, }, "swap.percent": { "label": "Swap percent usage", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, }, "swap.kb_used": { "label": "Swap used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": max_swap, }, } return data elif "sunos" in sys.platform: status = agent_util.SUPPORTED msg = None # Figure out how much memory we have max_ram = None retcode, output = agent_util.execute_command("/usr/sbin/prtconf") output = output.split("\n") for line in output: if "Memory" not in line: continue fields = line.split() max_ram = int(fields[2]) * 1024 data = { "ram.percent": { "label": "RAM percent usage", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, }, "ram.kb_used": { "label": "RAM used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": max_ram, }, } return data elif "vmware" in sys.platform: # Default Linux logic status = agent_util.SUPPORTED msg = None ret, mem_str = agent_util.execute_command( "esxcli hardware memory get | grep 'Physical Memory'" ) if ret == 0: ram_total = (float(mem_str.split(":")[1].split()[0]) / 1024) / 1024 else: status = agent_util.MISCONFIGURED msg = "ERROR RUNNING ESXCLI: %s" % mem_str data = { "ram.percent": { "label": "RAM percent usage", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, }, "ram.mb_used": { "label": "RAM used (MB)", "options": None, "status": status, "error_message": msg, "unit": "mB", "min_value": 0, "max_value": ram_total, }, } return data elif "hp-ux" in sys.platform: status = agent_util.SUPPORTED msg = None ret, output = agent_util.execute_command("swapinfo -ta") max_ram = None max_swap = None for line in output: if line.startswith("memory"): parts = line.split() # max ram in kb max_ram = int(parts[1]) if line.startswith("dev"): parts = line.split() max_swap = int(parts[1]) data = { "ram.percent": { "label": "RAM percent usage", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, }, "ram.kb_used": { "label": "RAM used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": max_ram, }, "swap.percent": { "label": "Swap percent usage", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, }, "swap.kb_used": { "label": "Swap used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": max_swap, }, } return data else: # Default Linux logic status = agent_util.SUPPORTED msg = None if not os.path.exists("/proc/meminfo"): status = agent_util.MISCONFIGURED msg = "Enable procfs." # Get memory info to find max values lines = open("/proc/meminfo", "r").readlines() usage_info = {} for line in lines: line = line.strip() m = re.match(r"^(?P<key>.+?):\s+(?P<value>\d+)", line) usage_info[m.group("key")] = int(m.group("value")) ram_total = usage_info.get("MemTotal", 0) swap_total = usage_info.get("SwapTotal", 0) data = { "ram.percent": { "label": "RAM percent usage", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": 100, }, "ram.kb_used": { "label": "RAM used (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": ram_total, }, "ram.kb_buffer": { "label": "RAM buffer (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": ram_total, }, "swap.percent": { "label": "Swap percent usage", "options": None, "status": status, "error_message": msg, "unit": "percent", "min_value": 0, "max_value": 100, }, "swap.kb_used": { "label": "Swap used (Kb)", "options": None, "status": status, "error_message": msg, "min_value": 0, "max_value": swap_total, }, "ram.kb_active": { "label": "RAM active (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": ram_total, }, "ram.kb_inactive": { "label": "RAM inactive (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": ram_total, }, "ram.kb_cached": { "label": "RAM cached (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": ram_total, }, "swap.kb_cached": { "label": "Swap cached (kB)", "options": None, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": ram_total, }, } return data def check(self, textkey, data, config): sysctl = agent_util.which("sysctl") if "freebsd" in sys.platform: mem_info = {} ret, output = agent_util.execute_command(sysctl + " -a") d = self._parse_sysctl(output) buffsize = d["vfs.bufspace"] pagesize = d["hw.pagesize"] total = d["hw.physmem"] inactive = d["vm.stats.vm.v_inactive_count"] * pagesize active = d["vm.stats.vm.v_active_count"] * pagesize cache = d["vm.stats.vm.v_cache_count"] * pagesize free = d["vm.stats.vm.v_free_count"] * pagesize avail = inactive + cache + free used = total - avail self.log.debug("buffsize %s" % str(buffsize)) self.log.debug("pagesize %s" % str(pagesize)) self.log.debug("total %s" % str(total)) self.log.debug("inactive %s" % str(inactive)) self.log.debug("active %s" % str(active)) self.log.debug("cache %s" % str(cache)) self.log.debug("free %s" % str(free)) self.log.debug("avail %s" % str(avail)) self.log.debug("used %s" % str(used)) mem_info["ram.kb_used"] = used / 1024.0 if total > 0: mem_info["ram.percent"] = (float(used) / total) * 100.0 else: mem_info["ram.percent"] = 0.0 mem_info["ram.kb_active"] = active / 1024.0 mem_info["ram.kb_inactive"] = inactive / 1024.0 mem_info["ram.kb_cached"] = cache / 1024.0 mem_info["ram.kb_buffer"] = buffsize / 1024.0 # total installed RAM in KB ram_max = int(total) / 1024 return mem_info[textkey] elif "darwin" in sys.platform: vmstat = agent_util.which("vm_stat") ret, output = agent_util.execute_command(vmstat) pageSize = None pageToken = "Mach Virtual Memory Statistics:" vm_data = {} for line in output.split("\n"): if line.startswith(pageToken): try: pageSize = int(line[len(pageToken) :].split()[-2]) self.log.debug("Memory page size -> {}".format(pageSize)) except Exception as e: pass elif ":" in line: try: key, val = line.split(":", 1) val = float(val.strip()) vm_data[key] = val except: pass if pageSize is None: self.log.error("Could not compute page size") return None kbMultiplier = float(pageSize / 1024) wired = vm_data.get("Pages wired down", 0) * kbMultiplier active = vm_data.get("Pages active", 0) * kbMultiplier inactive = vm_data.get("Pages inactive", 0) * kbMultiplier free = vm_data.get("Pages free", 0) * kbMultiplier if textkey == "ram.kb_used": return wired + active elif textkey == "ram.kb_active": return active elif textkey == "ram.kb_inactive": return inactive elif textkey == "ram.kb_wired": return wired elif textkey == "ram.percent": return 100.0 * (wired + active) / (wired + active + inactive + free) return None elif "aix" in sys.platform: retcode, output = agent_util.execute_command("svmon -O,unit=KB") output = output.split("\n") self.log.debug("svmon output: %s" % output) for line in output: if line.startswith("memory"): parts = line.split() # max ram in kb max_ram = int(parts[1]) if textkey == "ram.percent": return 100.0 * int(parts[5]) / int(parts[1]) elif textkey == "ram.kb_used": # svmon -G returns counts in 4096 byte pages return int(parts[2]) if line.startswith("pg space"): parts = line.split() if textkey == "swap.percent": return 100.0 * int(parts[3]) / int(parts[2]) elif textkey == "swap.kb_used": # svmon -G returns counts in 4096 byte pages return int(parts[3]) # Unknown AIX textkey return None elif "sunos" in sys.platform: retcode, output = agent_util.execute_command( "prstat -s rss 1 1 | awk '{print $4}'" ) output = output.split("\n") kb_used = 0 for line in output: if "RSS" in line or not line: continue if "G" in line: used = line.strip("M") kb_used += (int(used) * 1024) * 1024 elif "M" in line: used = line.strip("M") kb_used += int(used) * 1024 elif "K" in line: used = line.strip("K") kb_used += int(used) retcode, output = agent_util.execute_command("/usr/sbin/prtconf") output = output.split("\n") for line in output: if "Memory" not in line: continue fields = line.split() kb_total = int(fields[2]) * 1024 # setting this to its own var in case we do something funky with kb_total or reset it somehwere max_ram = kb_total kb_free = kb_total - kb_used if textkey == "ram.percent": return 100.0 * (1.0 - float(kb_free) / float(kb_total)) elif textkey == "ram.kb_used": return kb_used # Unknown Solaris textkey return None elif "vmware" in sys.platform: hostname = socket.gethostname() ret, mem_str = agent_util.execute_command( "esxcli hardware memory get | grep 'Physical Memory'", cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT, ) if ret == 0: mb_total = (int(mem_str.split(":")[1].split()[0]) / 1024) / 1024 self.log.debug("Found %s MB of RAM installed" % mb_total) ret, out = agent_util.execute_command( "esxtop -b -n 2 -d 2", cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT ) out_list = out.split("\n") headers = out_list[0].replace('"', "").split(",") data = [] for idx, val in enumerate(out_list[::1]): if not val or val == "": continue data = out_list[idx].replace('"', "").split(",") mb_free_search = r"\\\\%s\Memory\Free MBytes" % hostname self.log.debug("Searching VMware for %s" % mb_free_search) mb_free_idx = search_esxtop(headers, mb_free_search) if mb_free_idx: self.log.debug("VMware free memory index %s" % mb_free_idx) mb_free = float(data[mb_free_idx]) else: self.log.error("Unable to find RAM info from esxcli output") return None if textkey == "ram.mb_used": return mb_total - mb_free if textkey == "ram.percent": return ((mb_total - mb_free) / mb_total) * 100.0 if "hp-ux" in sys.platform: ret, out = agent_util.execute_command("swapinfo -ta") usage_info = {} for line in out.splitlines(): if line.startswith("dev"): l = line.split() self.log.debug("Swap: %s" % l) usage_info["swap.kb_used"] = float(l[2]) if l[2] == 0 or l[2] == "-": usage_info["swap.percent"] = 0 else: usage_info["swap.percent"] = (float(l[2]) / float(l[1])) * 100.0 if line.startswith("memory"): l = line.split() self.log.debug("RAM: %s" % l) usage_info["ram.kb_used"] = float(l[2]) usage_info["ram.percent"] = (float(l[2]) / float(l[1])) * 100.0 return usage_info.get(textkey, None) else: # Default Linux logic lines = open("/proc/meminfo", "r").readlines() usage_info = {} mem_info = {} for line in lines: line = line.strip() m = re.match(r"^(?P<key>.+?):\s+(?P<value>\d+)", line) usage_info[m.group("key")] = int(m.group("value")) ram_buffer = usage_info.get("Buffers", 0) ram_cached = usage_info.get("Cached", 0) ram_active = usage_info.get("Active", 0) ram_available = usage_info.get("MemAvailable", 0) ram_inactive = usage_info.get("Inactive", 0) ram_total = usage_info.get("MemTotal", 0) if config.get("percent_usage_override"): ram_free = usage_info.get("MemFree", 0) elif "MemAvailable" in usage_info: ram_free = ram_available else: ram_free = usage_info.get("MemFree", 0) + ram_buffer + ram_cached ram_used = ram_total - ram_free # setting max_ram to its own var in case we do something funky with ram_total later max_ram = ram_total mem_info["ram.kb_buffer"] = ram_buffer mem_info["ram.kb_used"] = ram_used if ram_total > 0: mem_info["ram.percent"] = (float(ram_used) / ram_total) * 100.0 else: mem_info["ram.percent"] = 0.0 mem_info["ram.kb_active"] = ram_active mem_info["ram.kb_inactive"] = ram_inactive mem_info["ram.kb_cached"] = ram_cached swap_cached = usage_info.get("SwapCached", 0) swap_total = usage_info.get("SwapTotal", 0) swap_free = usage_info.get("SwapFree", 0) swap_used = swap_total - swap_free # including max amount of swap as well b/c we calc a percentage on that as well max_swap = swap_total mem_info["swap.kb_used"] = swap_used if swap_total > 0: mem_info["swap.percent"] = (float(swap_used) / swap_total) * 100.0 else: mem_info["swap.percent"] = 0.0 mem_info["swap.kb_cached"] = swap_cached self.log.debug("ram_total %s" % str(ram_total)) self.log.debug("ram_free %s" % str(ram_free)) self.log.debug("ram_active %s" % str(ram_active)) self.log.debug("ram_inactive %s" % str(ram_inactive)) self.log.debug("ram_cached %s" % str(ram_cached)) self.log.debug("swap_total %s" % str(swap_total)) self.log.debug("swap_free %s" % str(swap_free)) self.log.debug("swap_used %s" % str(swap_used)) self.log.debug("swap_cached %s" % str(swap_cached)) return mem_info[textkey] mongo.py000064400000041154151700142040006234 0ustar00from datetime import datetime import agent_util import re import datetime from agent_util import float import logging def custom_json_converter(d): if "__datetime__" in d: dt = d["__datetime__"] # handling for the two different type of datetime formats mongo uses try: dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%SZ") return dt except: dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%fZ") return dt return d def execute_query(query, config): mongo_bin = agent_util.which("mongo", exc=True) hostname = config.get("hostname", "localhost").strip() port = int(config.get("port", "27017")) user = config.get("username", "").strip() password = config.get("password", "").strip() database = config.get("database", "").strip() client_cert = config.get("client_cert", "").strip() ca_cert = config.get("ca_cert", "").strip() authenticationdb = config.get("authenticationdb", "").strip() mongo_connection = "%s " % mongo_bin if user: mongo_connection += "-u %s " % user if password: mongo_connection += "-p %s" % password.strip() if client_cert and ca_cert: mongo_connection += ( "--ssl --sslPEMKeyFile %s --sslCAFile %s --authenticationDatabase '$external' --authenticationMechanism MONGODB-X509" % (client_cert, ca_cert) ) if authenticationdb: mongo_connection += " --authenticationDatabase %s" % authenticationdb.strip() mongo_connection += " %s:%s/%s" % (hostname, port, database) query = "printjson(%s)" % query cmd = "%s --quiet --eval %r" % (mongo_connection, query) status, raw_output = agent_util.execute_command(cmd) # strip out some of the special mongo objects from the JSON so we can parse it result = raw_output.strip() result = re.sub( r"Timestamp\((.*?), (.*?)\)", r'{"__timestamp__": {"t": \1, "i": \2}}', result ) result = re.sub(r"ISODate\((.*?)\)", r'{"__datetime__": \1}', result) result = re.sub(r'NumberLong\("?-?([0-9]+)"?\)', r"\1", result) result = re.sub(r"ObjectId\((.*?)\)", r"\1", result) result = re.sub( r"BinData\((.*?),(.*?)\)", r'{"__bindata__:": {"a": \1, "b": \2}}', result ) # conver the cleaned JSON to a dict try: result = agent_util.json_loads(result, object_hook=custom_json_converter) except: result = {} return (status, result, raw_output) class MongoPlugin(agent_util.Plugin): textkey = "mongo" label = "Mongo" @classmethod def get_metadata(self, config): # used for general, single node instances status = agent_util.SUPPORTED msg = None # if mongo server is detected in replica set, extended options will become available replica_set_status = agent_util.SUPPORTED replica_set_msg = None mongo = agent_util.which("mongo") if not mongo: self.log.info("mongo binary not found") status = agent_util.UNSUPPORTED msg = "mongo binary not found" return {} if status is agent_util.SUPPORTED: # we're testing the general connection with this and if it's in a replica set cmd_status, result, raw_output = execute_query("rs.status()", config) self.log.debug("Mongo result: %s" % result) # connection failed if cmd_status != 0: self.log.exception( "error connecting to mongo server. raw output: %s" % raw_output ) status = agent_util.MISCONFIGURED msg = "Check your Mongo connection settings in the agent config file." replica_set_status = agent_util.UNSUPPORTED replica_set_msg = ( "Check your Mongo connection settings in the agent config file." ) self.log.warning(replica_set_msg) # connection succeeded, now check for replica set prescense else: if not int(result.get("ok", 0)): replica_set_status = agent_util.UNSUPPORTED replica_set_msg = "Mongo server is not part of a replica set" self.log.warning(replica_set_msg) self.log.error(raw_output) self.log.error("Mongo keys: %s" % result.keys()) data = { "reads": { "label": "Reads", "options": None, "status": status, "error_message": msg, "unit": "reads/sec", }, "writes": { "label": "Writes", "options": None, "status": status, "error_message": msg, "unit": "writes/sec", }, "objects": { "label": "Objects", "options": None, "status": status, "error_message": msg, "unit": "objects", }, "dataSize": { "label": "Size of documents", "options": None, "status": status, "error_message": msg, "unit": "bytes", }, "indexSize": { "label": "Size of indexes", "options": None, "status": status, "error_message": msg, "unit": "bytes", }, "extra_info.page_faults": { "label": "Page Faults", "options": None, "status": status, "error_message": msg, "unit": "faults/sec", }, "storageSize": { "label": "Size of extents", "options": None, "status": status, "error_message": msg, "unit": "bytes", }, "connections.current": { "label": "Current connections", "options": None, "status": status, "error_message": msg, "unit": "connections", }, "connections.available": { "label": "Available connections", "options": None, "status": status, "error_message": msg, "unit": "connections", }, "cursors.total_open": { "label": "Open cursors", "options": None, "status": status, "error_message": msg, "unit": "cursors", }, "cursors.timed_out": { "label": "Timed out cursors", "options": None, "status": status, "error_message": msg, "unit": "cursors", }, "globalLock.currentQueue.total": { "label": "Total number of operations queued waiting for lock", "options": None, "status": status, "error_message": msg, "unit": "operations", }, "globalLock.currentQueue.readers": { "label": "Number of operations queued waiting for read lock", "options": None, "status": status, "error_message": msg, "unit": "operations", }, "globalLock.currentQueue.writers": { "label": "Number of operations queued waiting for write lock", "options": None, "status": status, "error_message": msg, "unit": "operations", }, "replica_set.is_primary": { "label": "Is replica set PRIMARY member", "options": None, "status": replica_set_status, "error_message": replica_set_msg, "unit": "boolean", }, "replica_set.state": { "label": "MongoDB replica set member state", "options": None, "status": replica_set_status, "error_message": replica_set_msg, "unit": "state", }, "replica_set.health": { "label": "MongoDB replica set member health", "options": None, "status": replica_set_status, "error_message": replica_set_msg, "unit": "health", }, "replica_set.primary_optime_date_difference": { "label": "Difference of primary node's optime date and realtime", "options": None, "status": replica_set_status, "error_message": replica_set_msg, "unit": "seconds", }, "replica_set.max_members_ping_ms_difference": { "label": "Maximum difference of members' ping ms from primary node's ping ms", "options": None, "status": replica_set_status, "error_message": replica_set_msg, "unit": "ms", }, } return data @classmethod def get_metadata_docker(self, container, config): if "hostname" not in config: try: ip = agent_util.get_container_ip(container) config["hostname"] = ip except Exception: self.log.exception("Docker metadata error") config["from_docker"] = True return self.get_metadata(config) def get_mongo_cache(self, textkey, curr_reqs): cache = self.get_cache_results("mongo:%s" % textkey, None) if not cache: self.log.info("Empty mongo cache! Building for the first time.") self.cache_result("mongo:%s" % textkey, None, curr_reqs, replace=True) return None, None delta, previous_reqs = cache[0] self.cache_result("mongo:%s" % textkey, None, curr_reqs, replace=True) return delta, float(previous_reqs) def check(self, textkey, data, config): rs_status, rs_result, rs_raw_output = execute_query("rs.status()", config) ss_status, ss_result, ss_raw_output = execute_query("db.serverStatus()", config) db_stats, db_result, db_raw_output = execute_query("db.stats()", config) self.log.debug("Debug Output from Mongo Plugin:") self.log.debug(rs_result) self.log.debug(ss_raw_output) if textkey == "reads": queries = ss_result.get("opcounters", {}).get("query", None) get_mores = ss_result.get("opcounters", {}).get("getmore", None) if queries is None or get_mores is None: return None queries = float(queries) get_mores = float(get_mores) curr_data = queries + get_mores delta, previous_data = self.get_mongo_cache(textkey, curr_data) if previous_data is None or curr_data < previous_data: return None return (curr_data - previous_data) / float(delta) if textkey == "writes": inserts = ss_result.get("opcounters", {}).get("insert", None) updates = ss_result.get("opcounters", {}).get("update", None) deletes = ss_result.get("opcounters", {}).get("delete", None) if inserts is None or updates is None or deletes is None: return None inserts = float(inserts) updates = float(updates) deletes = float(deletes) curr_data = inserts + updates + deletes delta, previous_data = self.get_mongo_cache(textkey, curr_data) if previous_data is None or curr_data < previous_data: return None return (curr_data - previous_data) / float(delta) if textkey == "extra_info.page_faults": curr_data = ss_result.get("extra_info", {}).get("page_faults", None) if curr_data is None: return None curr_data = float(curr_data) delta, previous_data = self.get_mongo_cache(textkey, curr_data) if previous_data is None or curr_data < previous_data: return None return (curr_data - previous_data) / float(delta) if textkey == "connections.current": return ss_result.get("connections", {}).get("current", None) if textkey == "dataSize": return db_result.get("dataSize", None) if textkey == "storageSize": return db_result.get("storageSize", None) if textkey == "indexSize": return db_result.get("indexSize", None) if textkey == "objects": return db_result.get("objects", None) elif textkey == "connections.available": return ss_result.get("connections", {}).get("available", None) elif textkey == "cursors.total_open": return ss_result.get("cursors", {}).get("totalOpen", None) elif textkey == "cursors.timed_out": return ss_result.get("cursors", {}).get("timedOut", None) elif textkey == "globalLock.currentQueue.total": return ( ss_result.get("globalLock", {}) .get("currentQueue", {}) .get("total", None) ) elif textkey == "globalLock.currentQueue.readers": return ( ss_result.get("globalLock", {}) .get("currentQueue", {}) .get("readers", None) ) elif textkey == "globalLock.currentQueue.writers": return ( ss_result.get("globalLock", {}) .get("currentQueue", {}) .get("writers", None) ) elif textkey == "replica_set.is_primary": is_primary = 0 for member in rs_result.get("members", []): if ( member.get("self") in (True, "true") and member.get("stateStr") == "PRIMARY" ): is_primary = 1 return is_primary elif textkey == "replica_set.state": return rs_result["myState"] elif textkey == "replica_set.health": health = None for member in rs_result.get("members", []): if member.get("self") in (True, "true"): health = member.get("health", None) return health elif "all_members_healthy" in textkey: for member in rs_result["members"]: try: if member["health"] != 1.0: return False except: continue return True else: try: primaryNodeStatus = filter( lambda node: node["stateStr"] == "PRIMARY", rs_result["members"] )[0] except: return None if "primary_optime_date_difference" in textkey: try: td = datetime.datetime.now() - primaryNodeStatus["optimeDate"] return_val = ( td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6 ) / float(10**6) self.log.debug( "primary_optime_date_difference: %s" % str(return_val) ) return return_val except: return 999999 elif "max_members_ping_ms_difference" in textkey: if "pingMs" not in primaryNodeStatus: return 0 max_ping_ms_difference = -1 for member in status["members"]: if member["stateStr"] != "PRIMARY": try: max_ping_ms_difference = max( max_ping_ms_difference, abs(primaryNodeStatus["pingMs"] - member["pingMs"]), ) except: continue if max_ping_ms_difference == -1: return 999999 self.log.debug("max_ping_ms_difference: %s" % max_ping_ms_difference) return max_ping_ms_difference def check_docker(self, container, textkey, data, config): if "hostname" not in config: try: ip = agent_util.get_container_ip(container) config["hostname"] = ip except Exception as e: self.log.exception(e) config["from_docker"] = True return self.check(textkey, data, config) mysql.py000064400000050633151700142040006264 0ustar00import agent_util import csv import sys if sys.version[0] == "3": from io import StringIO else: from StringIO import StringIO from agent_util import float def execute_query(config, query): cmd = agent_util.which("mysql", exc=True) if "host" in config: cmd += " -h %s" % config["host"] if "port" in config: cmd += " -P %s" % config["port"] if "username" in config: cmd += " -u %s" % config["username"] if "password" in config and config["password"].strip(): cmd += " --password='%s'" % config["password"].strip() cmd += " -Be %r" % str(query) if "database" in config: cmd += " %s" % config["database"].strip() status, output = agent_util.execute_command(cmd) if status != 0: raise Exception(output) output = StringIO(output) parsed_output = list(csv.reader(output, delimiter="\t")) matcher = "mysql: [Warning] Using a password on the command line interface can be insecure." for i in parsed_output: if matcher in parsed_output[0]: del parsed_output[0] return parsed_output def parse_instance_configs(config): """ Parse the configuration info for multiple instances from the MySQL config block that gets passed in from the agent. """ instance_configs = {} extended_metrics = None for key, values in config.items(): if key == "extended_metrics": extended_metrics = values else: for i, value in enumerate(str(values).split(",")): if i not in instance_configs: instance_configs[i] = {} if len(value) > 0: instance_configs[i][key] = value # We assume that the extended metrics option # applies to all instances, so if it was found in the # config, apply it to all instances if extended_metrics is not None: for i, config in instance_configs.items(): config["extended_metrics"] = extended_metrics return instance_configs.values() def resource_name(instance_config): """ Returns the resource name for a given instance_config """ host = instance_config.get("host", "127.0.0.1") port = instance_config.get("port", "3306") db = instance_config.get("database", None) resource = "%s:%s" % (host, port) if db: resource += "-%s" % db return resource def get_instance_config(resource, all_configs): """ Search for an instance config in 'all_configs' that has a resource name equal to 'resource' """ for config in all_configs: if resource_name(config) == resource: return config return all_configs[0] def metadata_options(instance_configs): """ Given a set of instance configs, generate the 'option' field for the agent metadata payload. """ options = [] for config in instance_configs: d = { "host": config.get("host", "127.0.0.1"), "port": config.get("port", "3306"), "resource": resource_name(config), } if "database" in config and config["database"]: d["database"] = config["database"] options.append(d) return options READ_COMMANDS = { "com_selects_per_second": "Com_select", "com_writes_per_second": "Com_insert", "com_updates_per_second": "Com_update", "com_deletes_per_second": "Com_delete", } class MySQLPlugin(agent_util.Plugin): textkey = "mysql" label = "MySQL" @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None instance_configs = parse_instance_configs(config) options = metadata_options(instance_configs) # check if mysql is even installed client_installed = agent_util.which("mysql") if not client_installed: msg = "MySQL client was not found. Please install the client or add it to the default path." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED and not config: msg = "The [mysql] config block was not found in the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED: for cfg in instance_configs: if not "username" in cfg or not "password" in cfg: msg = "The username and password entries were not found in the [mysql] block of the agent config file." self.log.info(msg) status = agent_util.MISCONFIGURED if status == agent_util.SUPPORTED: for cfg in instance_configs: try: output = execute_query(cfg, "SHOW DATABASES") if cfg.get("debug", False): self.log.debug( "#####################################################" ) self.log.debug("Mysql command 'SHOW DATABASES' output:") self.log.debug(str(output)) self.log.debug( "#####################################################" ) except: self.log.exception("error running mysql query") status = agent_util.MISCONFIGURED msg = "Unable to authenticate with MySQL, please double-check the credentials in the agent config file." data = { # basic "queries_per_second": { "label": "Average queries per second", "options": options, "status": status, "error_message": msg, "unit": "queries/s", }, "slow_queries_per_minute": { "label": "Average number of slow queries per minute", "options": options, "status": status, "error_message": msg, "unit": "queries/minute", }, "query_cache.percent_free": { "label": "MySQL query cache percent free", "options": options, "status": status, "error_message": msg, "unit": "percent", }, "query_cache.kb_free": { "label": "MySQL query cache amount free (kB)", "options": options, "status": status, "error_message": msg, "unit": "kB", }, "connections": { "label": "MySQL connections", "options": options, "status": status, "error_message": msg, "unit": "connections", }, "com_selects_per_second": { "label": "SELECT rate", "options": options, "status": status, "error_message": msg, "unit": "transactions/s", }, "com_writes_per_second": { "label": "INSERT rate", "options": options, "status": status, "error_message": msg, "unit": "transactions/s", }, "com_updates_per_second": { "label": "UPDATE rate", "options": options, "status": status, "error_message": msg, "unit": "transactions/s", }, "com_deletes_per_second": { "label": "DELETE rate", "options": options, "status": status, "error_message": msg, "unit": "transactions/s", }, # replication "slave.running": { "label": "MySQL Slave server is replicating", "options": options, "status": status, "error_message": msg, }, "slave.io_running": { "label": "MySQL Slave server is connected to the Master", "options": options, "status": status, "error_message": msg, }, "slave.latency": { "label": "MySQL Slave server latency (seconds)", "options": options, "status": status, "error_message": msg, "unit": "seconds", }, "extended_metric.innodb_row_lock_current_waits": { "label": "Innodb current row lock waits", "options": options, "status": status, "error_message": msg, "unit": "waits", }, "extended_metric.innodb_row_lock_time_avg": { "label": "Innodb row lock time avg", "options": options, "status": status, "error_message": msg, "unit": "ms", }, "extended_metric.threads_connected": { "label": "Threads connected", "options": options, "status": status, "error_message": msg, "unit": "threads", }, "extended_metric.threads_created": { "label": "Threads Created", "options": options, "status": status, "error_message": msg, "unit": "threads", }, "extended_metric.threads_running": { "label": "Threads Running", "options": options, "status": status, "error_message": msg, "unit": "threads", }, "extended_metric.questions": { "label": "MySQL Questions - count of statements executed from the client", "options": options, "status": status, "error_message": msg, "unit": "questions", }, "extended_metric.innodb_buffer_pool_pages_total": { "label": "Total pages in the buffer pool", "options": options, "status": status, "error_message": msg, "unit": "pages", }, "extended_metric.innodb_buffer_pool_read_requests": { "label": "Requests made to the buffer pool", "options": options, "status": status, "error_message": msg, "unit": "requests/s", }, "extended_metric.innodb_buffer_pool_reads": { "label": "Requests unfulfilled by the buffer pool", "options": options, "status": status, "error_message": msg, "unit": "reads/s", }, } extended_metrics = [] for cfg in instance_configs: if "extended_metrics" in cfg: extended_metrics = [ m.strip().lower() for m in cfg["extended_metrics"].split(",") ] for m in extended_metrics: data["extended_metric.%s" % m] = { "label": "MySQL %s" % m.replace("_", " "), "options": options, "status": status, "error_message": msg, } # galera cluster metrics if status == agent_util.SUPPORTED: galera_instance_configs = [] for cfg in instance_configs: if "wsrep" in str( execute_query(cfg, "SHOW GLOBAL STATUS LIKE 'wsrep%'") ): galera_instance_configs.append(cfg) if len(galera_instance_configs) > 0: galera_options = metadata_options(galera_instance_configs) data["galera_cluster_size"] = { "label": "Galera Cluster Size", "options": galera_options, "status": status, "error_message": msg, } data["galera_local_send_queue_avg"] = { "label": "Galera Send Queue Length", "options": galera_options, "status": status, "error_message": msg, } data["galera_local_rcv_queue_avg"] = { "label": "Galera Average size of Local Received Queue", "options": galera_options, "status": status, "error_message": msg, } return data @classmethod def get_metadata_docker(self, container, config): if "host" not in config: try: ip = agent_util.get_container_ip(container) config["host"] = ip except Exception: self.log.exception("get_metadata_docker error") return self.get_metadata(config) def check(self, textkey, data, config): instance_configs = parse_instance_configs(config) instance_config = get_instance_config(data, instance_configs) return self.check_instance(textkey, data, instance_config) def check_instance(self, textkey, resource, config): if textkey == "queries_per_second": col, res = execute_query( config, "SHOW GLOBAL STATUS where Variable_name='Queries';" ) if not res: return None curr_reqs = float(res[1]) delta, previous_reqs = self.get_mysql_cache(resource, textkey, curr_reqs) if not previous_reqs or curr_reqs < previous_reqs: return None return int((curr_reqs - previous_reqs) / float(delta)) if textkey == "slow_queries_per_second": col, res = execute_query( config, "SHOW GLOBAL STATUS where Variable_name='Slow_queries';" ) if not res: return None curr_reqs = float(res[1]) delta, previous_reqs = self.get_mysql_cache(resource, textkey, curr_reqs) if previous_reqs is None or curr_reqs < previous_reqs: return None return (curr_reqs - previous_reqs) / float(delta) if textkey == "slow_queries_per_minute": col, res = execute_query( config, "SHOW GLOBAL STATUS where Variable_name='Slow_queries';" ) if not res: return None curr_reqs = float(res[1]) delta, previous_reqs = self.get_mysql_cache(resource, textkey, curr_reqs) if previous_reqs is None or curr_reqs < previous_reqs: return None return (curr_reqs - previous_reqs) / float(delta / 60.0) if textkey in ( "com_selects_per_second", "com_writes_per_second", "com_updates_per_second", "com_deletes_per_second", ): column_name = READ_COMMANDS.get(textkey) _, data = execute_query( config, "SHOW GLOBAL STATUS WHERE Variable_name='%s';" % column_name ) if not data: return None curr_data = float(data[1]) delta, previous_data = self.get_mysql_cache(resource, textkey, curr_data) if previous_data is None or curr_data < previous_data: return None return (curr_data - previous_data) / float(delta) if "query_cache" in textkey: res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'Qcache_free_memory';") if not res: return None row = res[1] free = int(row[1]) self.log.debug("cache_free_memory: %d" % free) res = execute_query(config, "SHOW VARIABLES LIKE 'query_cache_size';") row = res[1] total = int(row[1]) self.log.debug("query_cache_size: %d" % total) if "percent_free" in textkey: if not total: return 0 return_val = int(float(free) / total * 100) self.log.debug("Percent free: %d" % return_val) return return_val else: return free elif textkey == "connections": res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'Threads_connected';") if not res: return None row = res[1] self.log.debug("Threads connected: %s" % str(row[1])) return int(row[1]) # galera if textkey == "galera_cluster_size": res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';") if not res: return None row = res[1] free = int(row[1]) self.log.debug("galera_cluster_size: %d" % free) return int(row[1]) if textkey == "galera_local_send_queue_avg": res = execute_query( config, "SHOW GLOBAL STATUS LIKE 'wsrep_local_send_queue';" ) if not res: return None row = res[1] free = int(row[1]) self.log.debug("galera_local_send_queue_avg: %d" % free) return int(row[1]) if textkey == "galera_local_rcv_queue_avg": res = execute_query( config, "SHOW GLOBAL STATUS LIKE 'wsrep_local_recv_queue';" ) if not res: return None row = res[1] free = int(row[1]) self.log.debug("galera_local_rcv_queue_avg: %d" % free) return int(row[1]) # extended_metrics elif "extended_metric" in textkey: metric_name = textkey.replace("extended_metric.", "") res = execute_query( config, "SHOW GLOBAL STATUS LIKE '" + metric_name + "';" ) try: row = res[1] result = int(row[1]) except: result = 0 if metric_name in ( "innodb_buffer_pool_read_requests", "innodb_buffer_pool_reads", ): curr_data = result delta, previous_data = self.get_mysql_cache( resource, textkey, curr_data ) if previous_data is None or curr_data < previous_data: result = None else: result = (curr_data - previous_data) / float(delta) self.log.debug( "%s: %s" % (textkey.replace("extended_metric.", "").title(), str(result)) ) return result # replication action = textkey.split(".")[-1] query_map = { "running": ("Slave_SQL_Running", lambda c: int(c == "Yes")), "io_running": ("Slave_IO_Running", lambda c: int(c == "Yes")), "latency": ("Seconds_Behind_Master", lambda c: int(c)), } column, fn = query_map[action] try: keys, values = execute_query(config, "SHOW SLAVE STATUS") except: self.log.info("Replication metrics not available") return None row = values[keys.index(column)] # apparently some mysql modules return an integer # and some a string.... normalize it row = str(row) if action == "latency" and not row: return 999999 elif not row: return False return fn(row) def check_docker(self, container, textkey, data, config): if "host" not in config: try: ip = agent_util.get_container_ip(container) config["host"] = ip except Exception as e: self.log.exception(e) return self.check(textkey, data, config) # USING THIS FOR 'D.R.Y.' and just because we may add cached results of other types in the future def get_mysql_cache(self, resource, textkey, curr_reqs): cache = self.get_cache_results("mysql:%s:%s" % (resource, textkey), None) if not cache: self.log.info("Empty mySQL cache! Building for the first time.") self.cache_result( "mysql:%s:%s" % (resource, textkey), None, curr_reqs, replace=True ) return None, None delta, previous_reqs = cache[0] self.cache_result( "mysql:%s:%s" % (resource, textkey), None, curr_reqs, replace=True ) return delta, float(previous_reqs) fortisase_connection.py000064400000015767151700142040011346 0ustar00import os import logging from ipaddress import IPv4Address, IPv6Address from configparser import ConfigParser, NoOptionError import sys import json import time import re import agent_util EPCTRL_LOG = "/Library/Application Support/Fortinet/FortiClient/Logs/epctrl.log" class FortisaseVPNConnection: def __init__(self): self.connection_state = None self.connection_name = "" self.connected_address = None self.valid_tunnels = [ "Secure Internet Access", "FortiSASE Cloud Security", ] self.log = logging.getLogger("fortisase") def parse_connection(self, data): """ Receive a json data that contains the information of the VPN Connection, and its ip address """ connections = json.loads(data) for connection in connections: tunnel_name = connection.get("tunnel_name") connected = bool(connection.get("connected")) ip = connection.get("ip_address") if any([i in tunnel_name for i in self.valid_tunnels]) and connected: # Tunnel is valid self.connection_state = "Connected" self.connection_name = tunnel_name if ":" in ip: self.connected_address = IPv6Address(ip) else: self.connected_address = IPv4Address(ip) return True return False def is_sia_tunnel_up(self): """ Return True only if the tunnel name and connection state is in acceptable parameters. """ return ( any([i in self.connection_name for i in self.valid_tunnels]) and self.connection_state == "Connected" ) def get_public_ip_address(self): """ Parse the epctrl log file from FortiClient, backwards and return the first succesfull hit of the public_ip_checker found. Break after finding it. """ if not os.path.exists(EPCTRL_LOG): self.log.warning(f"{EPCTRL_LOG} file not found.") return start = time.time() ip_address = None with open(EPCTRL_LOG, "rb") as opened: file_size = opened.seek(0, os.SEEK_END) position = file_size while ip_address is None: if position == 0: break position = max(0, position - 5000) opened.seek(position) chunk = opened.read(min(5000, file_size - position)) ip_address = self._parse_log_chunk(chunk.decode()) end = time.time() self.log.info( f"Parsed {EPCTRL_LOG} in {end - start} seconds. Found: {ip_address}" ) return ip_address def _parse_log_chunk(self, chunk: str): for line in chunk.split("\n"): match = re.search( r"^.*\spublic_ip_checker:\d*\sGot public IP from ipify:\s(.*)$", line ) if match: try: return IPv4Address(match.groups()[0]) except Exception as err: self.log.warning( f"Unable to parse ipadress {match.groups()}. Err {err}" ) class FortiSaseConnection(agent_util.Plugin): textkey = "fortisase" label = "FortiSase" log = logging.getLogger("fortisase") # Default config file for Agent Config for FortiSase installations. # We use it to determine the installation type. If file is not present, is not valid config_file = "/usr/local/FortiMonitor/agent/config/fm-agent/fm_agent.cfg" @classmethod def get_metadata(cls, config): status = agent_util.SUPPORTED msg = None # Plugin only set to work on OSX for now. if sys.platform.lower() != "darwin": return {} # Agent needs to be a FortiSase installation to work as well. if not os.path.exists(cls.config_file): cls.log.info( "Agent config file not found. Unable to determine handshake type" ) return {} config_reader = ConfigParser() config_reader.read(cls.config_file) try: is_fortisase_install = ( config_reader.get("agent", "handshake_type").lower() == "forticlient" ) except NoOptionError: is_fortisase_install = False if not is_fortisase_install: status = agent_util.UNSUPPORTED msg = "Agent installation is not FortiSase" if status == agent_util.UNSUPPORTED: cls.log.info(f"Fortisase connection plugin disabled. {msg}") return {} metadata = { "osx.connected_sia": { "label": "Connected SIA", "options": None, "status": status, "error_msg": msg, "unit": "bool", }, "osx.turbo_ip": { "label": "Turbo IP", "options": None, "status": status, "error_msg": msg, }, "osx.public_ip": { "label": "Endpoint Public Ip", "options": None, "status": status, "error_msg": msg, }, } return metadata def check(self, textkey, data, config): try: # This file configuration is only available on FortiClient 7.4.4 and above. vpn_data = "/Library/Application Support/Fortinet/FortiClient/data/vpn_status_info.json" vpn_data_exists = os.path.exists(vpn_data) connected, metric = None, None client = FortisaseVPNConnection() if vpn_data_exists: with open(vpn_data, "r") as opened: data = opened.read() connected = client.parse_connection(data) else: self.log.warning( f"{vpn_data} file does not exist. Unable to fully collect data" ) if textkey == "osx.connected_sia": # Grab the fortitray connection value from the log file. connected = client.is_sia_tunnel_up() if connected: return 1 else: return 0 elif textkey == "osx.turbo_ip": connected = client.is_sia_tunnel_up() if not connected: # If the connection is not detected we don't need to check the turbo ip. return ip_address = client.connected_address if ip_address: metric = int(ip_address) elif textkey == "osx.public_ip": public_ip = client.get_public_ip_address() if public_ip: metric = int(public_ip) if metric: return float(metric) except Exception as msg: self.log.warning(f"Unable to process the FortiSase files. Error: {msg}") return
/home/emeraadmin/www/4d695/../Service/../4d695/plugins.tar