<?php
$url='http://edition.cnn.com/?fbid=4OofUbASN5k';
$var = fread_url($url);// function calling to get the page from curl
$search = array('@<script[^>]*?>.*?</script>@si'); // Strip out javascript
$var = preg_replace($search, "\n", html_entity_decode($var)); // Strip out javascript
$linklabel = array();
$link = array();
$dom = new DOMDocument($var);
@$dom->loadHTML($var);
$xpath = new DOMXPath($dom);// Grab the DOM nodes
foreach($xpath->find('a') as $element) {
array_push($linklabel, $element->innerText);
print $linklabel;
array_push($link, $element->href);
print $link.'<br>';
}
function fread_url($url) {
if(function_exists("curl_init")) {
$ch = curl_init();
$user_agent = "Mozilla/4.0 (compatible; MSIE 5.01; ".
"Windows NT 5.0)";
$ch = curl_init();
curl_setopt($ch, CURLOPT_USERAGENT, $user_agent);
curl_setopt( $ch, CURLOPT_HTTPGET, 1 );
curl_setopt( $ch, CURLOPT_RETURNTRANSFER, 1 );
curl_setopt( $ch, CURLOPT_FOLLOWLOCATION , 1 );
curl_setopt( $ch, CURLOPT_FOLLOWLOCATION , 1 );
curl_setopt( $ch, CURLOPT_URL, $url );
curl_setopt ($ch, CURLOPT_COOKIEJAR, 'cookie.txt');
$html = curl_exec($ch);
//print $html;//printing the web page.
curl_close($ch);
}
else {
$hfile = fopen($url,"r");
if($hfile) {
while(!feof($hfile)) {
$html.=fgets($hfile,1024);
}
}
}
return $html;
}
i need to seperate links and link labels into two seperate arrays. i followed several forums and made a code, but is getting error. i don't know about the find function used in the code