Wednesday, December 11, 2013

SPEC RPM template

A good template to start:

Name: demoproject
Version: 0.1
Release: 1%{?dist}
Summary: Demo script for doing something cool
Group: DemoGroup
License: GPL
Source0: demoproject-0.1.tar.gz
BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
%description
Demo project that does something interesting
%prep
%setup -q
%build
%install
install --directory $RPM_BUILD_ROOT/usr/sbin
install --directory $RPM_BUILD_ROOT/etc
install --directory $RPM_BUILD_ROOT/etc/cron.d/
install -m 0755 demoscript $RPM_BUILD_ROOT/usr/sbin
install -m 0744 demoscript.conf $RPM_BUILD_ROOT/etc
install -m 0744 cron/democronscript $RPM_BUILD_ROOT/etc/cron.d/
%clean
rm -rf $RPM_BUILD_ROOT
%files
/usr/sbin/demoscript
/etc/demoscript.conf
/etc/cron.d/democronscript
%changelog
view raw demo.spec hosted with ❤ by GitHub
:)

Tuesday, October 15, 2013

watching queries per second (vmstat style) on MySQL

Good trick!

mysqladmin ext -ri 1 |grep -i -e queries -e \-
view raw vmsqlstat.sh hosted with ❤ by GitHub
+-----------------------------------+--------------+
+-----------------------------------+--------------+
| Qcache_queries_in_cache | 5282 |
| Queries | 669070336 |
| Slow_queries | 2950563 |
+-----------------------------------+--------------+
+-----------------------------------+--------------+
+-----------------------------------+--------------+
| Innodb_data_pending_fsyncs | -1 |
| Qcache_free_memory | -2440 |
| Qcache_queries_in_cache | 1 |
| Queries | 211 |
| Slow_queries | 0 |
+-----------------------------------+--------------+
+-----------------------------------+--------------+
+-----------------------------------+--------------+
| Qcache_queries_in_cache | 0 |
| Queries | 177 |
| Slow_queries | 0 |
+-----------------------------------+--------------+
+-----------------------------------+--------------+
+-----------------------------------+--------------+
| Innodb_data_pending_fsyncs | -1 |
| Innodb_os_log_pending_fsyncs | -1 |
| Qcache_queries_in_cache | 0 |
| Queries | 133 |
| Slow_queries | 0 |
+-----------------------------------+--------------+
+-----------------------------------+--------------+
+-----------------------------------+--------------+
| Open_files | -8 |
| Qcache_queries_in_cache | 0 |
| Queries | 687 |
| Slow_queries | 0 |
+-----------------------------------+--------------+
+-----------------------------------+--------------+
+-----------------------------------+--------------+
| Open_files | -2 |
| Qcache_queries_in_cache | -13 |
| Qcache_total_blocks | -12 |
| Queries | 544 |
| Slow_queries | 0 |
+-----------------------------------+--------------+
+-----------------------------------+--------------+
+-----------------------------------+--------------+
view raw zoutput hosted with ❤ by GitHub
:D

Wednesday, October 9, 2013

Compiling a .cpp source with OpenCV libs in MacOSX/Ubuntu 13.04

Yes!.... the way to run cpp is different between OS:
# MacOS X 10.7.x
# You need opencv is installed (macports)
g++ -ggdb `pkg-config --cflags opencv` `pkg-config --libs opencv` header.cpp header2.cpp program.cpp -o program
view raw macosx hosted with ❤ by GitHub
# Ubuntu 13.04 / Mint 15
# opencv from source because package version doesn't have nonfree modules separated (SURF/SIFT)
# and you can have problems (install pkg-config too)
g++ -o program program.cpp `pkg-config opencv --cflags --libs`
view raw ubuntu hosted with ❤ by GitHub
;)

Tuesday, October 1, 2013

massive-kill for mysql sleep connections (concept)

Mmmm.... It is not a good trick.... but it could be useful: https://gist.github.com/vicendominguez/8820342
#This is a bullshit but... i will kill all the sleep connections which sleeping time starting with 8
for a in `echo "show processlist" |mysql --user=root --password=tete coredb |egrep 'Sleep[[:space:]]+8[[:digit:]]+' |awk '{ print $1 }'`; do echo "KILL $a" | mysql --user=root --password=tete coredb; done
view raw gistfile1.sh hosted with ❤ by GitHub
Update: Perhaps this is a better way (Mysql procedure): https://gist.github.com/datacharmer/5946490
-- SQL
drop procedure if exists purge_slow_queries;
drop procedure if exists purge_idle_connections;
drop event if exists auto_purge_slow_queries;
drop event if exists auto_purge_idle_connections;
delimiter //
create procedure purge_idle_connections()
deterministic
begin
declare done boolean default false;
declare max_time int default coalesce(@max_kill_time, 200);
declare pid bigint;
declare c cursor for
SELECT id
FROM information_schema.processlist
WHERE command in ('Sleep')
AND time > max_time;
declare continue handler for not found
set done = true;
open c;
set @q_kill = 'KILL ?';
prepare q_kill from @q_kill;
PURGELOOP: loop
fetch c into pid;
if done then
leave PURGELOOP;
end if;
set @pid = pid;
execute q_kill using @pid;
end loop;
deallocate prepare q_kill;
end//
create procedure purge_slow_queries()
deterministic
begin
declare done boolean default false;
declare max_time int default coalesce(@max_kill_time, 200);
declare pid bigint;
declare c cursor for
SELECT id
FROM information_schema.processlist
WHERE state in ('executing')
AND time > max_time;
declare continue handler for not found
set done = true;
open c;
set @q_kill = 'KILL ?';
prepare q_kill from @q_kill;
PURGELOOP: loop
fetch c into pid;
if done then
leave PURGELOOP;
end if;
set @pid = pid;
execute q_kill using @pid;
end loop;
deallocate prepare q_kill;
end//
delimiter ;
create event auto_purge_idle_connections
on schedule every 10 second
do call purge_idle_connections();
create event auto_purge_slow_queries
on schedule every 10 second
do call purge_slow_queries();
:S

Thursday, September 26, 2013

Dokuwiki + Nginx config file (CentOS 6.4)

It works!
server {
server_name dokuwiki.domain.com;
root /var/www/dokuwiki;
location / {
index doku.php;
try_files $uri $uri/ @dokuwiki;
}
location ^~ /conf/ { return 403; }
location ^~ /data/ { return 403; }
location @dokuwiki {
rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
rewrite ^/(.*) /doku.php?id=$1 last;
}
location ~ \.php$ {
if (!-f $request_filename) { return 404; }
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass 127.0.0.1:9000;
}
}
view raw dokuwiki.conf hosted with ❤ by GitHub
:)

Thursday, July 25, 2013

Trick to compile GNURadio in MacOS X

Yeeah! this is the key....
# You need macports installed.
# This is the magic command line:
port install gnuradio +full configure.compiler=gcc
;)

Friday, July 19, 2013

Discovering the real number of CPUs in the host server from inside of a VZ container

Researching about ffmpeg enconding in a VZ container I have discovered it is possible to get the real number of CPUs inside of a VZ container. I had to use different flags in compilation time but it works (at least in Proxmox):
/* numcpu.c code is a part of FFMPEG source (libavutil/) ;)
You can play with the definitions in compilation time. Very interesting.
[root@host:~# grep -c processor /proc/cpuinfo
24
[root@build /]# grep -c processor /proc/cpuinfo
1
We have one openvz container with 1 CPU in one server with 24 cpus (cores: 2x cpu 6xcore 2x ht):
[root@build /]# gcc -D_GNU_SOURCE -DHAVE_SYSCTL -DHAVE_SYSCONF -Wall numcpu.c -o numcpu
[root@build /]# ./numcpu
_SC_NPROCESSORS_ONLN CPU num: 1
[root@build /]#
And pay attention:
[root@build /]# gcc -D_GNU_SOURCE -DHAVE_SCHED_GETAFFINITY -Wall numcpu.c -o numcpu
[root@build /]# ./numcpu
HAVE_SCHED_GETAFFINITY CPU num: 24
So it looks like the auto-discovering of CPUs inside of openvz container using the HAVE_SCHED_GETAFFINITY macros (and the sched_setaffinity function) is not ok (or it is very very ok ;)). Be careful. If you are using this method to get resources it could be a bad idea.
*/
#include <stdio.h>
#include <stdlib.h>
#include <features.h>
#include <sched.h>
#if HAVE_SCHED_GETAFFINITY
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <sched.h>
#endif
#if HAVE_GETPROCESSAFFINITYMASK
#include <windows.h>
#endif
#if HAVE_SYSCTL
#if HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysctl.h>
#endif
#if HAVE_SYSCONF
#include <unistd.h>
#endif
int av_cpu_count(void)
{
int nb_cpus = 0;
#if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT)
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
if (!sched_getaffinity(0, sizeof(cpuset), &cpuset))
nb_cpus = CPU_COUNT(&cpuset);
printf ("HAVE_SCHED_GETAFFINITY ");
#elif HAVE_GETPROCESSAFFINITYMASK
DWORD_PTR proc_aff, sys_aff;
if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff))
nb_cpus = av_popcount64(proc_aff);
printf ("HAVE_GETPROCESSAFFINITYMASK ");
#elif HAVE_SYSCTL && defined(HW_NCPU)
int mib[2] = { CTL_HW, HW_NCPU };
size_t len = sizeof(nb_cpus);
if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
nb_cpus = 0;
printf ("HW_NCPU ");
#elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN)
nb_cpus = sysconf(_SC_NPROC_ONLN);
printf ("_SC_NPROC_ONLN ");
#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
nb_cpus = sysconf(_SC_NPROCESSORS_ONLN);
printf ("_SC_NPROCESSORS_ONLN ");
#endif
return nb_cpus;
}
int main(int argc, char *argv[])
{
int numcpu=av_cpu_count();
printf ("CPU num: %d \n", numcpu);
exit(EXIT_SUCCESS);
}
view raw numcpu.c hosted with ❤ by GitHub
:O

Thursday, June 13, 2013

Websockets + Nodejs + Nginx

With the last version of Nginx, it supports http 1.1 with bidirectional websockets. So it is possible to use it with nodejs (by example).

A spanish intro in this post: http://www.securityartwork.es/2013/06/13/abstrayendo-websockets-ssl/

Here, my minimal-snippets: One plain and one for SSL:


upstream websockets_nodejs {
server backend:9090;
}
server {
listen 80;
server_name sock.midominio.es;
root /usr/local/app/sock/app/webroot;
keepalive_timeout 512;
location / {
proxy_pass http://websockets_nodejs;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
upstream websockets_nodejs {
server backend:9090;
}
server {
listen 443;
server_name sckts.midominio.es;
root /usr/local/app/sock/app/webroot;
index index.php;
keepalive_timeout 512;
ssl on;
ssl_certificate /etc/nginx/server.crt;
ssl_certificate_key /etc/nginx/server.key;
ssl_session_timeout 5m;
ssl_protocols SSLv2 SSLv3 TLSv1;
ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://websockets_nodejs;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
;)

Tuesday, May 14, 2013

Searching full anonymous proxies

I have created a PoC to try to get full anonymous proxies. This script discovers if your IP is full changed and anonymous using a proxy. It works via NSE by NMAP.

I wrote one post about it in Spanish here: http://www.securityartwork.es/2013/05/14/a-vueltas-con-la-deteccion-de-proxys/

The PoC source code here: https://github.com/vicendominguez/http-open-proxy-anon

:)

Vte

Monday, April 15, 2013

Horrible script to show me the headers in the webserver to get the correct-real client ip

Horrible script to show me the headers in the webserver to get the correct-real client ip:


:|

Wednesday, March 6, 2013

dump xlsx in terminal and with "grep" tool you can search words

I worked in a company with the network inventory in a Excel. It was useful to search names and ips from linux terminal.

It dumps the xlsx file to console and I was using "grep" to search words or IP or whatever you want . May be you can get something else:
#!/usr/bin/env perl
# with a "grep" in terminal you can search words in a excel
use strict;
use warnings;
use Text::Iconv;
my $converter = Text::Iconv -> new ("utf-8", "windows-1251");
# Text::Iconv is not really required.
# This can be any object with the convert method. Or nothing.
use Spreadsheet::XLSX;
my $excel = Spreadsheet::XLSX -> new ('/systems/ipaddr.xlsx', $converter);
foreach my $sheet (@{$excel -> {Worksheet}}) {
printf("Sheet: %s\n", $sheet->{Name});
$sheet -> {MaxRow} ||= $sheet -> {MinRow};
foreach my $row ($sheet -> {MinRow} .. $sheet -> {MaxRow}) {
$sheet -> {MaxCol} ||= $sheet -> {MinCol};
printf "\n";
foreach my $col ($sheet -> {MinCol} .. $sheet -> {MaxCol}) {
my $cell = $sheet -> {Cells} [$row] [$col];
if ($cell) {
# printf("( %s , %s ) => %s\n", $row, $col, $cell -> {Val});
printf("%s ", $cell -> {Val});
}
}
}
}
view raw dumpxlsx.pl hosted with ❤ by GitHub
:)

Monday, February 18, 2013

Migrating compatible fork'ed Nagios platforms....

May be this script will be useful for you. You can create another new one from here. It has two parts:
#!/usr/bin/perl
##################
## CSV Format ##
## Delimiter ; ##
##################
######################################################
### ###
### name;ip_contador ###
### ###
######################################################
# you should change the template in the end of this script
# vdominguez 2012
use strict;
use warnings;
my $file = $ARGV[0] or die "Need to get CSV file on the command line\n";
open(my $data, '<', $file) or die "Could not open '$file' $!\n";
while (my $line = <$data>) {
chomp $line;
my @fields = split ";" , $line;
if ($fields[0] && $fields[1]) {
my $hosts = create_host_in_file ($fields[0],$fields[1]);
print $hosts;
}
}
sub create_host_in_file {
my $host_name=shift;
my $host_address=shift;
my $template = <<END;
define host {
host_name $host_name
alias $host_name
address $host_address
use HostGeneric
# check_command process-service-perfdata
# event_handler process-service-perfdata
# notification_period none
# check_command process-service-perfdata
# event_handler process-service-perfdata
# notification_period none
# contact_groups sistemasdist-Grupo
}
#!/usr/bin/perl -w
#############################################
# Convert host.cfg from nagios in a CSV? #
# Debian requires .deb:libnagios-object-perl#
#############################################
# vdominguez 2012
use strict;
use lib qw( ./lib ../lib);
use Nagios::Config;
use Nagios::Object::Config;
use Data::Dumper;
Nagios::Object::Config->strict_mode(1);
my $file = $ARGV[0] or die "Need a host.cfg file\n";
my $obj = undef;
$obj = Nagios::Object::Config->new();
$obj->parse($file);
if ($ARGV[1] =~ m/-d/) {
print Dumper($obj), "\n";
} else {
my $hosts = $obj->all_objects_for_type("Nagios::Host");
if (scalar(@$hosts) == 0) {
# print "No hosts have yet been defined\n";
} else {
foreach my $host (@$hosts) {
printf $host->host_name . ";" . $host->address . "\n";
}
}
}
:D

Wednesday, January 16, 2013

"Automagic" mount (at boot time) of a windows folder in FreeBSD 7

Mounting share folder at boot time looks like easy yeah.... but you need to know what the trick is.

From my gist: https://gist.github.com/vicendominguez/8806533
"Automagic" mount (at boot time) of a windows folder in FreeBSD 7!
------------------------------------------------------------------
@vicendominguez
* Enviroment:
- shared folder in windows 2003 with user/pass
- FreeBSD 7.00
* Pre:
- smbclient to check the permissions
- mount_smbfs should be available. It will load the correct .ko in the kernel.
* First Checks:
- check with smbclient if you can access to the shared folder with the user/pass (smbclient implements CIFS directly without kernel or fs layer)
* Procedure:
- Create a /etc/hosts entry with the windows name of the box and his IP. By example:
10.0.0.14 sbsserver
- Create line in the fstab file (/etc/fstab) like:
//user@sbsserver/dir /mnt/windowslocal smbfs rw 0 0
- user: windows user
- dir: it is the windows shared folder
- Create credentials in /etc/nsmb.conf like:
[SBSSERVER:USER]
password=mypass
========= STOP HERE - VERY IMPORTANT!!!! ==================
In the /etc/nsmb.conf the CAPS is a MUST! hostname and user MUST be in CAPS!
===========================================================
- "mount /mnt/windowslocal" should work without ask the password.
- If everything is ok, when the system is booting, it should mount the shared folder from windows.
view raw gistfile1.txt hosted with ❤ by GitHub
:)

Friday, January 4, 2013

Installing Navisphere CLI on CentOS 6.2

Yes! you need to find the NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64.rpm file. And yes, it is not easy. Google is your friend.

Installing Navisphere CLI on CentOS 6.2
--------------------------------------------------
twitter: @vicendominguez
* If you have this problem with dependences:
# rpm -ivh NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64.rpm
error: Failed dependencies:
libc.so.6 is needed by NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64
libc.so.6(GLIBC_2.0) is needed by NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64
libc.so.6(GLIBC_2.1) is needed by NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64
libc.so.6(GLIBC_2.3) is needed by NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64
libdl.so.2 is needed by NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64
libdl.so.2(GLIBC_2.0) is needed by NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64
libdl.so.2(GLIBC_2.1) is needed by NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64
Try:
# yum install glibc-utils.x86_64 compat-glibc-headers.x86_64 glibc.i686
And:
# rpm -ivh NaviCLI-Linux-64-x86-en_US-7.31.25.1.24-1.x86_64.rpm
Preparing... ########################################### [100%]
1:NaviCLI-Linux-64-x86-en########################################### [100%]
Run the script /opt/Navisphere/bin/setlevel_cli.sh to set the security level before you proceed.
#
view raw gistfile1.txt hosted with ❤ by GitHub
;)

Wednesday, January 2, 2013

Commvault backups history from Windows Terminal.

It is tested with CommVault 9 and you will need to install qcommands (it comes with commvault).

The authentication is out of the script (at the moment). Remember to execute 'qlogin' to validate the user/pass first.

' Dirty script to check Failed backups from Commvault 9.00
' Author: Vicente Dominguez - contact via Twitter @vicendominguez ;)
' Require: qlist from qcommands (Commvault 9.00)
' Save as check_commvaulthistory.vbs in server with commvault sw install
' Command line to execute: cscript //nologo check_commvaulthistory.vbs
' Remember to execute 'qlogin' to validate the user/pass
Option Explicit
' On Error Resume Next
Function getCommandOutput(theCommand)
Dim objShell, objCmdExec
Set objShell = CreateObject("WScript.Shell")
Set objCmdExec = objshell.exec(thecommand)
getCommandOutput = objCmdExec.StdOut.ReadAll
end Function
' Formatting output from qlist command (if qlist output changes... be careful here)
Function CreateArrayClient()
Dim ClientList(), ClientAux, ClientArray, ClientName, NumList
ClientArray = split (getCommandOutput ("qlist client"), vbNewLine)
NumList = 0
For Each ClientAux in ClientArray
' Dynamic array resize
redim preserve ClientList (NumList)
' Output 'qlist' validation, min two chars in name to be ok
If Len (ClientAux) > 2 Then
ClientName = Split (ClientAux, " ")
' Client must be "ACTIVE"
If InStr (Mid(ClientAux,47), "Yes") Then
ClientList(NumList) = ClientName(0)
NumList = NumList + 1
End If
End If
Next
CreateArrayClient = ClientList
End Function
Function GetHostHistory (Hostname)
Dim HostHistory
HostHistory = getCommandOutput("qlist jobhistory -c '" & Hostname & "'")
GetHostHistory = HostHistory
End Function
Function GetHostFailedHistory (Hostname)
Dim HostHistory
HostHistory = getCommandOutput("qlist jobhistory -js Failed -c '" & Hostname & "'")
If InStr (HostHistory, "No jobs to display") Then
GetHostFailedHistory = 0
Else
GetHostFailedHistory = 1
End If
End Function
Sub BackupAudit ()
Dim Hostname
For Each Hostname in CreateArrayclient
If Len (HostName) > 1 Then
If GetHostFailedHistory (Hostname) Then
Wscript.Echo Hostname & " FAIL: " & GetHostHistory (Hostname)
Else
Wscript.Echo Hostname & " OK "
End If
End If
Next
End Sub
BackupAudit()
:)